From 52ff62f4b65e9df5afd98d46d80e19d2a9a691e0 Mon Sep 17 00:00:00 2001 From: samuelburnham <45365069+samuelburnham@users.noreply.github.com> Date: Fri, 10 Apr 2026 17:35:47 -0400 Subject: [PATCH 01/17] feat: LeanCtorScalar trait for easier field access --- Tests/FFI.lean | 12 ++++ Tests/Gen.lean | 18 +++++ src/object.rs | 177 +++++++++++++++++++++++++++++++++++++++++++++++- src/test_ffi.rs | 42 ++++++++++++ 4 files changed, 247 insertions(+), 2 deletions(-) diff --git a/Tests/FFI.lean b/Tests/FFI.lean index d43a1f2..d87e6a8 100644 --- a/Tests/FFI.lean +++ b/Tests/FFI.lean @@ -64,6 +64,12 @@ opaque roundtripExtScalarStruct : @& ExtScalarStruct → ExtScalarStruct @[extern "rs_roundtrip_usize_struct"] opaque roundtripUSizeStruct : @& USizeStruct → USizeStruct +@[extern "rs_roundtrip_bool_struct"] +opaque roundtripBoolStruct : @& BoolStruct → BoolStruct + +@[extern "rs_roundtrip_multi_u32_struct"] +opaque roundtripMultiU32Struct : @& MultiU32Struct → MultiU32Struct + @[extern "rs_roundtrip_float"] opaque roundtripFloat : Float → Float @@ -359,6 +365,12 @@ def borrowedRoundtripTests : TestSeq := test "ExtScalarStruct max" (show Bool from roundtripExtScalarStruct ⟨100, 0xFF, 0xFFFF, 0xFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 1.0, 1.0⟩ == ⟨100, 0xFF, 0xFFFF, 0xFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 1.0, 1.0⟩) ++ test "USizeStruct zeros" (roundtripUSizeStruct ⟨0, 0, 0⟩ == ⟨0, 0, 0⟩) ++ test "USizeStruct mixed" (roundtripUSizeStruct ⟨42, 99, 255⟩ == ⟨42, 99, 255⟩) ++ + test "BoolStruct all false" (roundtripBoolStruct ⟨0, false, false, false⟩ == ⟨0, false, false, false⟩) ++ + test "BoolStruct all true" (roundtripBoolStruct ⟨42, true, true, true⟩ == ⟨42, true, true, true⟩) ++ + test "BoolStruct mixed" (roundtripBoolStruct ⟨7, true, false, true⟩ == ⟨7, true, false, true⟩) ++ + test "MultiU32Struct zeros" (roundtripMultiU32Struct ⟨0, 0, 0, 0⟩ == ⟨0, 0, 0, 0⟩) ++ + test "MultiU32Struct max" (roundtripMultiU32Struct ⟨100, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF⟩ == ⟨100, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF⟩) ++ + test "MultiU32Struct mixed" (roundtripMultiU32Struct ⟨1, 42, 0, 99⟩ == ⟨1, 42, 0, 99⟩) ++ test "External all fields" (externalAllFields (mkRustData 42 99 "hello") == "42:99:hello") ++ test "External all fields zeros" (externalAllFields (mkRustData 0 0 "") == "0:0:") ++ test "External large u64" (rustDataGetX (mkRustData 0xFFFFFFFFFFFFFFFF 0 "test") == 0xFFFFFFFFFFFFFFFF) ++ diff --git a/Tests/Gen.lean b/Tests/Gen.lean index 59337e8..e549e8f 100644 --- a/Tests/Gen.lean +++ b/Tests/Gen.lean @@ -122,6 +122,24 @@ structure USizeStruct where u8val : UInt8 deriving Repr, BEq, DecidableEq, Inhabited +/-- Structure with multiple Bool scalar fields for batch-read/write testing. + Layout: 1 object field (obj : Nat), then 3 Bool scalars. -/ +structure BoolStruct where + obj : Nat + b1 : Bool + b2 : Bool + b3 : Bool +deriving Repr, BEq, DecidableEq, Inhabited + +/-- Structure with multiple UInt32 scalar fields for batch-read/write testing. + Layout: 1 object field (obj : Nat), then 3 UInt32 scalars. -/ +structure MultiU32Struct where + obj : Nat + a : UInt32 + b : UInt32 + c : UInt32 +deriving Repr, BEq, DecidableEq, Inhabited + /-! ## Shrinkable instances -/ instance : Shrinkable Nat where diff --git a/src/object.rs b/src/object.rs index ac4158f..f6cb164 100644 --- a/src/object.rs +++ b/src/object.rs @@ -896,6 +896,41 @@ impl From> for LeanOwned { // [obj_0, obj_1, ..., obj_{n-1}] [usize_0, ...] [scalar bytes (descending size)] /// Typed wrapper for a Lean constructor object (tag 0–`LEAN_MAX_CTOR_TAG`). +/// +/// # Constructor field kinds +/// +/// Lean constructor fields are either: +/// +/// - **Object fields**: `lean_object*` values — either a pointer to a heap +/// object (lowest bit 0) or a boxed immediate value (lowest bit 1). +/// +/// - **Scalar fields**: values stored inline as their unboxed C types +/// (`uint8_t`, `uint32_t`, `double`, etc.) rather than as `lean_object*`. +/// For example, `Bool` is stored as an unboxed `uint8_t` when it appears +/// directly in a constructor, but as a boxed `lean_object*` in polymorphic +/// contexts like `Array Bool`. +/// +/// # Data area layout +/// +/// After the 8-byte header, a constructor's data area begins with a contiguous +/// array of pointer-width entries accessed via `lean_ctor_obj_cptr`. A single +/// pointer-width entry in this array is called a **slot**. The full layout is: +/// +/// 1. Object field slots (one `lean_object*` per field) +/// 2. `USize` slots (one `usize` per field) +/// 3. Fixed-size scalar bytes, sorted by descending size +/// (u64/f64, u32/f32, u16, u8/bool) +/// +/// # Offset conventions +/// +/// For fixed-size scalar types (`u8`–`u64`, `f32`, `f64`, `bool`), `offset` is +/// a byte offset into the data area and consecutive fields of the same type are +/// `size_of::()` bytes apart. +/// +/// For `usize`, `offset` is a slot index (see above). `USize` slots sit right +/// after object field slots, so `lean_ctor_get_usize(o, n + i)` retrieves the +/// `i`-th `USize` field given `n` object field slots. Consecutive `usize` +/// fields are 1 slot apart. #[repr(transparent)] pub struct LeanCtor(R); @@ -979,8 +1014,7 @@ impl LeanCtor { include::lean_ctor_get_float32(self.0.as_raw(), Self::scalar_offset(num_objs, offset)) } } - /// Read a `usize` at slot `slot` past `num_objs` object fields. - /// Uses a **slot index** (not byte offset). + /// Read a `usize` at `slot` after `num_objs` object fields. #[allow(clippy::cast_possible_truncation)] pub fn get_usize(&self, num_objs: usize, slot: usize) -> usize { unsafe { include::lean_ctor_get_usize(self.0.as_raw(), (num_objs + slot) as u32) } @@ -988,6 +1022,24 @@ impl LeanCtor { pub fn get_bool(&self, num_objs: usize, offset: usize) -> bool { self.get_u8(num_objs, offset) != 0 } + + /// Read `N` consecutive scalar fields of type `T` starting at `offset`. + /// + /// `num_objs` is the number of object fields in this constructor. + /// `offset` is a byte offset for fixed-size scalars, or a slot index for + /// `usize` (see [`LeanCtorScalar`] for details on the data area layout). + pub fn scalars( + &self, + num_objs: usize, + offset: usize, + ) -> [T; N] { + std::array::from_fn(|i| T::ctor_get(self, num_objs, offset + i * T::OFFSET)) + } + + /// Convenience alias for `scalars::`. + pub fn get_bools(&self, num_objs: usize, offset: usize) -> [bool; N] { + self.scalars(num_objs, offset) + } } impl LeanCtor { @@ -1096,6 +1148,27 @@ impl LeanCtor { pub fn set_bool(&self, num_objs: usize, offset: usize, val: bool) { self.set_u8(num_objs, offset, val as u8); } + + /// Write `N` consecutive scalar fields of type `T` starting at `offset`. + /// + /// `num_objs` is the number of object fields in this constructor. + /// `offset` is a byte offset for fixed-size scalars, or a slot index for + /// `usize` (see [`LeanCtorScalar`] for details on the data area layout). + pub fn set_scalars( + &self, + num_objs: usize, + offset: usize, + vals: [T; N], + ) { + for (i, val) in vals.into_iter().enumerate() { + T::ctor_set(self, num_objs, offset + i * T::OFFSET, val); + } + } + + /// Convenience alias for `set_scalars::`. + pub fn set_bools(&self, num_objs: usize, offset: usize, vals: [bool; N]) { + self.set_scalars(num_objs, offset, vals); + } } impl From> for LeanOwned { @@ -1108,6 +1181,106 @@ impl From> for LeanOwned { } } +// ----------------------------------------------------------------------------- +// LeanCtorScalar — trait for batch scalar field access +// ----------------------------------------------------------------------------- + +/// Trait for scalar types that can be batch-read/written from a [`LeanCtor`]. +/// +/// See [`LeanCtor`] for the full data area layout and offset conventions. +pub trait LeanCtorScalar: Copy { + /// Distance between consecutive fields of this type: + /// `size_of::()` bytes for fixed-size scalars, or 1 slot for `usize`. + const OFFSET: usize; + + /// Read one scalar field at `offset`. + fn ctor_get(ctor: &LeanCtor, num_objs: usize, offset: usize) -> Self; + + /// Write one scalar field at `offset` (owned ctor only). + fn ctor_set(ctor: &LeanCtor, num_objs: usize, offset: usize, val: Self); +} + +impl LeanCtorScalar for bool { + const OFFSET: usize = 1; + fn ctor_get(ctor: &LeanCtor, num_objs: usize, offset: usize) -> Self { + ctor.get_bool(num_objs, offset) + } + fn ctor_set(ctor: &LeanCtor, num_objs: usize, offset: usize, val: Self) { + ctor.set_bool(num_objs, offset, val); + } +} + +impl LeanCtorScalar for u8 { + const OFFSET: usize = 1; + fn ctor_get(ctor: &LeanCtor, num_objs: usize, offset: usize) -> Self { + ctor.get_u8(num_objs, offset) + } + fn ctor_set(ctor: &LeanCtor, num_objs: usize, offset: usize, val: Self) { + ctor.set_u8(num_objs, offset, val); + } +} + +impl LeanCtorScalar for u16 { + const OFFSET: usize = 2; + fn ctor_get(ctor: &LeanCtor, num_objs: usize, offset: usize) -> Self { + ctor.get_u16(num_objs, offset) + } + fn ctor_set(ctor: &LeanCtor, num_objs: usize, offset: usize, val: Self) { + ctor.set_u16(num_objs, offset, val); + } +} + +impl LeanCtorScalar for u32 { + const OFFSET: usize = 4; + fn ctor_get(ctor: &LeanCtor, num_objs: usize, offset: usize) -> Self { + ctor.get_u32(num_objs, offset) + } + fn ctor_set(ctor: &LeanCtor, num_objs: usize, offset: usize, val: Self) { + ctor.set_u32(num_objs, offset, val); + } +} + +impl LeanCtorScalar for u64 { + const OFFSET: usize = 8; + fn ctor_get(ctor: &LeanCtor, num_objs: usize, offset: usize) -> Self { + ctor.get_u64(num_objs, offset) + } + fn ctor_set(ctor: &LeanCtor, num_objs: usize, offset: usize, val: Self) { + ctor.set_u64(num_objs, offset, val); + } +} + +impl LeanCtorScalar for f32 { + const OFFSET: usize = 4; + fn ctor_get(ctor: &LeanCtor, num_objs: usize, offset: usize) -> Self { + ctor.get_f32(num_objs, offset) + } + fn ctor_set(ctor: &LeanCtor, num_objs: usize, offset: usize, val: Self) { + ctor.set_f32(num_objs, offset, val); + } +} + +impl LeanCtorScalar for f64 { + const OFFSET: usize = 8; + fn ctor_get(ctor: &LeanCtor, num_objs: usize, offset: usize) -> Self { + ctor.get_f64(num_objs, offset) + } + fn ctor_set(ctor: &LeanCtor, num_objs: usize, offset: usize, val: Self) { + ctor.set_f64(num_objs, offset, val); + } +} + +impl LeanCtorScalar for usize { + // USize fields are indexed by slot, so offset is 1 slot. + const OFFSET: usize = 1; + fn ctor_get(ctor: &LeanCtor, num_objs: usize, offset: usize) -> Self { + ctor.get_usize(num_objs, offset) + } + fn ctor_set(ctor: &LeanCtor, num_objs: usize, offset: usize, val: Self) { + ctor.set_usize(num_objs, offset, val); + } +} + // ============================================================================= // LeanExternal — External objects (tag LEAN_TAG_EXTERNAL) // ============================================================================= diff --git a/src/test_ffi.rs b/src/test_ffi.rs index f1c86e9..df8a652 100644 --- a/src/test_ffi.rs +++ b/src/test_ffi.rs @@ -34,6 +34,10 @@ crate::lean_domain_type! { LeanUSizeStruct; /// Lean `RustData` — opaque external object LeanRustData; + /// Lean `BoolStruct` — structure BoolStruct where obj : Nat; b1 : Bool; b2 : Bool; b3 : Bool + LeanBoolStruct; + /// Lean `MultiU32Struct` — structure MultiU32Struct where obj : Nat; a : UInt32; b : UInt32; c : UInt32 + LeanMultiU32Struct; } /// Build a Lean Nat from a Rust Nat (delegates to `Nat::to_lean`). @@ -410,6 +414,44 @@ pub(crate) extern "C" fn rs_roundtrip_usize_struct( LeanUSizeStruct::new(out.into()) } +// ============================================================================= +// BoolStruct / MultiU32Struct roundtrips (batch scalar access) +// ============================================================================= + +/// Round-trip a BoolStruct using `scalars` / `set_scalars`. +/// Lean layout: 1 obj field, then 3 Bool scalars at offsets 0, 1, 2. +/// Total scalar size: 3 bytes. +#[unsafe(no_mangle)] +pub(crate) extern "C" fn rs_roundtrip_bool_struct( + ptr: LeanBoolStruct>, +) -> LeanBoolStruct { + let ctor = ptr.as_ctor(); + let obj_nat = Nat::from_obj(&ctor.get(0)); + let [b1, b2, b3] = ctor.scalars::<3, bool>(1, 0); + + let out = LeanCtor::alloc(0, 1, 3); + out.set(0, build_nat(&obj_nat)); + out.set_scalars::<3, bool>(1, 0, [b1, b2, b3]); + LeanBoolStruct::new(out.into()) +} + +/// Round-trip a MultiU32Struct using `scalars` / `set_scalars`. +/// Lean layout: 1 obj field, then 3 UInt32 scalars at offsets 0, 4, 8. +/// Total scalar size: 12 bytes. +#[unsafe(no_mangle)] +pub(crate) extern "C" fn rs_roundtrip_multi_u32_struct( + ptr: LeanMultiU32Struct>, +) -> LeanMultiU32Struct { + let ctor = ptr.as_ctor(); + let obj_nat = Nat::from_obj(&ctor.get(0)); + let [a, b, c] = ctor.scalars::<3, u32>(1, 0); + + let out = LeanCtor::alloc(0, 1, 12); + out.set(0, build_nat(&obj_nat)); + out.set_scalars::<3, u32>(1, 0, [a, b, c]); + LeanMultiU32Struct::new(out.into()) +} + // ============================================================================= // Float / Float32 / USize scalar roundtrips // ============================================================================= From 16732fe795a3392624e63c3cd701499b6b921b46 Mon Sep 17 00:00:00 2001 From: samuelburnham <45365069+samuelburnham@users.noreply.github.com> Date: Tue, 14 Apr 2026 11:12:39 -0400 Subject: [PATCH 02/17] Simplify API --- src/object.rs | 285 +++++++++++++++++++++++------------------------- src/test_ffi.rs | 71 +++++++----- 2 files changed, 175 insertions(+), 181 deletions(-) diff --git a/src/object.rs b/src/object.rs index f6cb164..05562bd 100644 --- a/src/object.rs +++ b/src/object.rs @@ -36,6 +36,12 @@ use tags::*; /// Constructor tag for `IO.Error.userError`. const IO_ERROR_USER_ERROR_TAG: u8 = 7; +/// Convert a `usize` to `u32` for the Lean C API, panicking on overflow. +#[inline] +fn to_u32(val: usize) -> u32 { + u32::try_from(val).expect("value exceeds u32::MAX") +} + // ============================================================================= // LeanRef trait — shared interface for owned and borrowed references // ============================================================================= @@ -924,13 +930,20 @@ impl From> for LeanOwned { /// # Offset conventions /// /// For fixed-size scalar types (`u8`–`u64`, `f32`, `f64`, `bool`), `offset` is -/// a byte offset into the data area and consecutive fields of the same type are -/// `size_of::()` bytes apart. +/// an **absolute byte offset** from `lean_ctor_obj_cptr` — the same convention +/// as the Lean C API functions `lean_ctor_get_uint8`, `lean_ctor_set_uint32`, +/// etc. Use [`scalar_base`](Self::scalar_base) to compute where the scalar +/// area starts: +/// +/// ```ignore +/// let base = ctor.scalar_base(0); // 0 USize fields; obj count from header +/// let x = ctor.get_u64(base + 0); +/// let y = ctor.get_u32(base + 8); +/// ``` /// -/// For `usize`, `offset` is a slot index (see above). `USize` slots sit right -/// after object field slots, so `lean_ctor_get_usize(o, n + i)` retrieves the -/// `i`-th `USize` field given `n` object field slots. Consecutive `usize` -/// fields are 1 slot apart. +/// For `usize`, the accessor takes a 0-based **slot index**. `USize` slots sit +/// right after object field slots; the header-derived object count is added +/// internally, so `get_usize(0)` reads the first `USize` field. #[repr(transparent)] pub struct LeanCtor(R); @@ -956,9 +969,8 @@ impl LeanCtor { /// Get a borrowed reference to the `i`-th object field. pub fn get(&self, i: usize) -> LeanBorrowed<'_> { - #[allow(clippy::cast_possible_truncation)] LeanBorrowed( - unsafe { include::lean_ctor_get(self.0.as_raw(), i as u32) }, + unsafe { include::lean_ctor_get(self.0.as_raw(), to_u32(i)) }, PhantomData, ) } @@ -972,73 +984,71 @@ impl LeanCtor { // ------------------------------------------------------------------------- // Scalar field readers // ------------------------------------------------------------------------- - // - // `num_objs` is the number of non-scalar fields (object + usize) preceding - // the scalar area. `offset` is a byte offset within the scalar area. - // For `get_usize`, `slot` is a slot index (not byte offset). - /// Compute the absolute byte offset for a scalar field. - #[allow(clippy::cast_possible_truncation)] + /// Number of object fields, read from the constructor header. #[inline] - fn scalar_offset(num_objs: usize, offset: usize) -> u32 { - (num_objs * 8 + offset) as u32 + pub fn num_objs(&self) -> usize { + unsafe { include::lean_ctor_num_objs(self.0.as_raw()) as usize } } - pub fn get_u8(&self, num_objs: usize, offset: usize) -> u8 { - unsafe { - include::lean_ctor_get_uint8(self.0.as_raw(), Self::scalar_offset(num_objs, offset)) - } + /// Byte offset from `lean_ctor_obj_cptr` where fixed-size scalar fields + /// begin. Pass `num_usize` = number of `USize` fields in this constructor + /// (0 for most types). The object field count is read from the header. + /// + /// Use the returned value as a base when calling scalar accessors: + /// ```ignore + /// let base = ctor.scalar_base(0); + /// let x = ctor.get_u64(base + 0); + /// let y = ctor.get_u32(base + 8); + /// ``` + #[inline] + pub fn scalar_base(&self, num_usize: usize) -> usize { + (self.num_objs() + num_usize) * std::mem::size_of::() } - pub fn get_u16(&self, num_objs: usize, offset: usize) -> u16 { - unsafe { - include::lean_ctor_get_uint16(self.0.as_raw(), Self::scalar_offset(num_objs, offset)) - } + + /// All scalar accessors below take `offset` as an absolute byte offset + /// from `lean_ctor_obj_cptr`, matching the Lean C API convention for + /// `lean_ctor_get_uint8`, `lean_ctor_get_uint32`, etc. + pub fn get_u8(&self, offset: usize) -> u8 { + unsafe { include::lean_ctor_get_uint8(self.0.as_raw(), to_u32(offset)) } } - pub fn get_u32(&self, num_objs: usize, offset: usize) -> u32 { - unsafe { - include::lean_ctor_get_uint32(self.0.as_raw(), Self::scalar_offset(num_objs, offset)) - } + pub fn get_u16(&self, offset: usize) -> u16 { + unsafe { include::lean_ctor_get_uint16(self.0.as_raw(), to_u32(offset)) } } - pub fn get_u64(&self, num_objs: usize, offset: usize) -> u64 { - unsafe { - include::lean_ctor_get_uint64(self.0.as_raw(), Self::scalar_offset(num_objs, offset)) - } + pub fn get_u32(&self, offset: usize) -> u32 { + unsafe { include::lean_ctor_get_uint32(self.0.as_raw(), to_u32(offset)) } } - pub fn get_f64(&self, num_objs: usize, offset: usize) -> f64 { - unsafe { - include::lean_ctor_get_float(self.0.as_raw(), Self::scalar_offset(num_objs, offset)) - } + pub fn get_u64(&self, offset: usize) -> u64 { + unsafe { include::lean_ctor_get_uint64(self.0.as_raw(), to_u32(offset)) } } - pub fn get_f32(&self, num_objs: usize, offset: usize) -> f32 { - unsafe { - include::lean_ctor_get_float32(self.0.as_raw(), Self::scalar_offset(num_objs, offset)) - } + pub fn get_f64(&self, offset: usize) -> f64 { + unsafe { include::lean_ctor_get_float(self.0.as_raw(), to_u32(offset)) } + } + pub fn get_f32(&self, offset: usize) -> f32 { + unsafe { include::lean_ctor_get_float32(self.0.as_raw(), to_u32(offset)) } } - /// Read a `usize` at `slot` after `num_objs` object fields. - #[allow(clippy::cast_possible_truncation)] - pub fn get_usize(&self, num_objs: usize, slot: usize) -> usize { - unsafe { include::lean_ctor_get_usize(self.0.as_raw(), (num_objs + slot) as u32) } + /// Read a `usize` at the given slot index (0-based, relative to the first + /// `USize` slot after object fields). The object field count is read from + /// the header and added internally. + pub fn get_usize(&self, slot: usize) -> usize { + unsafe { include::lean_ctor_get_usize(self.0.as_raw(), to_u32(self.num_objs() + slot)) } } - pub fn get_bool(&self, num_objs: usize, offset: usize) -> bool { - self.get_u8(num_objs, offset) != 0 + pub fn get_bool(&self, offset: usize) -> bool { + self.get_u8(offset) != 0 } /// Read `N` consecutive scalar fields of type `T` starting at `offset`. /// - /// `num_objs` is the number of object fields in this constructor. - /// `offset` is a byte offset for fixed-size scalars, or a slot index for - /// `usize` (see [`LeanCtorScalar`] for details on the data area layout). - pub fn scalars( - &self, - num_objs: usize, - offset: usize, - ) -> [T; N] { - std::array::from_fn(|i| T::ctor_get(self, num_objs, offset + i * T::OFFSET)) + /// `offset` is an absolute byte offset from `lean_ctor_obj_cptr` for + /// fixed-size scalars (use [`scalar_base`](Self::scalar_base) to compute + /// it), or a slot index for `usize`. + pub fn scalars(&self, offset: usize) -> [T; N] { + std::array::from_fn(|i| T::ctor_get(self, offset + i * T::OFFSET)) } /// Convenience alias for `scalars::`. - pub fn get_bools(&self, num_objs: usize, offset: usize) -> [bool; N] { - self.scalars(num_objs, offset) + pub fn get_bools(&self, offset: usize) -> [bool; N] { + self.scalars(offset) } } @@ -1055,18 +1065,17 @@ impl LeanCtor { /// Allocate a new constructor object. pub fn alloc(tag: u8, num_objs: usize, scalar_size: usize) -> Self { - #[allow(clippy::cast_possible_truncation)] - let obj = - unsafe { include::lean_alloc_ctor(tag as u32, num_objs as u32, scalar_size as u32) }; + let obj = unsafe { + include::lean_alloc_ctor(u32::from(tag), to_u32(num_objs), to_u32(scalar_size)) + }; Self(LeanOwned(obj)) } /// Set the `i`-th object field. Takes ownership of `val`. pub fn set(&self, i: usize, val: impl Into) { let val: LeanOwned = val.into(); - #[allow(clippy::cast_possible_truncation)] unsafe { - include::lean_ctor_set(self.0.as_raw(), i as u32, val.into_raw()); + include::lean_ctor_set(self.0.as_raw(), to_u32(i), val.into_raw()); } } @@ -1082,92 +1091,66 @@ impl LeanCtor { // ------------------------------------------------------------------------- // Scalar field setters (owned only — mutation) // ------------------------------------------------------------------------- + // + // All setters take `offset` as an absolute byte offset from + // `lean_ctor_obj_cptr`, matching the Lean C API convention. - pub fn set_u8(&self, num_objs: usize, offset: usize, val: u8) { + pub fn set_u8(&self, offset: usize, val: u8) { unsafe { - include::lean_ctor_set_uint8( - self.0.as_raw(), - Self::scalar_offset(num_objs, offset), - val, - ); + include::lean_ctor_set_uint8(self.0.as_raw(), to_u32(offset), val); } } - pub fn set_u16(&self, num_objs: usize, offset: usize, val: u16) { + pub fn set_u16(&self, offset: usize, val: u16) { unsafe { - include::lean_ctor_set_uint16( - self.0.as_raw(), - Self::scalar_offset(num_objs, offset), - val, - ); + include::lean_ctor_set_uint16(self.0.as_raw(), to_u32(offset), val); } } - pub fn set_u32(&self, num_objs: usize, offset: usize, val: u32) { + pub fn set_u32(&self, offset: usize, val: u32) { unsafe { - include::lean_ctor_set_uint32( - self.0.as_raw(), - Self::scalar_offset(num_objs, offset), - val, - ); + include::lean_ctor_set_uint32(self.0.as_raw(), to_u32(offset), val); } } - pub fn set_u64(&self, num_objs: usize, offset: usize, val: u64) { + pub fn set_u64(&self, offset: usize, val: u64) { unsafe { - include::lean_ctor_set_uint64( - self.0.as_raw(), - Self::scalar_offset(num_objs, offset), - val, - ); + include::lean_ctor_set_uint64(self.0.as_raw(), to_u32(offset), val); } } - pub fn set_f64(&self, num_objs: usize, offset: usize, val: f64) { + pub fn set_f64(&self, offset: usize, val: f64) { unsafe { - include::lean_ctor_set_float( - self.0.as_raw(), - Self::scalar_offset(num_objs, offset), - val, - ); + include::lean_ctor_set_float(self.0.as_raw(), to_u32(offset), val); } } - pub fn set_f32(&self, num_objs: usize, offset: usize, val: f32) { + pub fn set_f32(&self, offset: usize, val: f32) { unsafe { - include::lean_ctor_set_float32( - self.0.as_raw(), - Self::scalar_offset(num_objs, offset), - val, - ); + include::lean_ctor_set_float32(self.0.as_raw(), to_u32(offset), val); } } - /// Set a `usize` at slot `slot` past `num_objs` object fields. - /// Uses a **slot index** (not byte offset). - #[allow(clippy::cast_possible_truncation)] - pub fn set_usize(&self, num_objs: usize, slot: usize, val: usize) { + /// Set a `usize` at the given slot index (0-based, relative to the first + /// `USize` slot after object fields). The object field count is read from + /// the header and added internally. + pub fn set_usize(&self, slot: usize, val: usize) { unsafe { - include::lean_ctor_set_usize(self.0.as_raw(), (num_objs + slot) as u32, val); + include::lean_ctor_set_usize(self.0.as_raw(), to_u32(self.num_objs() + slot), val); } } - pub fn set_bool(&self, num_objs: usize, offset: usize, val: bool) { - self.set_u8(num_objs, offset, val as u8); + pub fn set_bool(&self, offset: usize, val: bool) { + self.set_u8(offset, val as u8); } /// Write `N` consecutive scalar fields of type `T` starting at `offset`. /// - /// `num_objs` is the number of object fields in this constructor. - /// `offset` is a byte offset for fixed-size scalars, or a slot index for - /// `usize` (see [`LeanCtorScalar`] for details on the data area layout). - pub fn set_scalars( - &self, - num_objs: usize, - offset: usize, - vals: [T; N], - ) { + /// `offset` is an absolute byte offset from `lean_ctor_obj_cptr` for + /// fixed-size scalars (use [`scalar_base`](LeanCtor::scalar_base) to + /// compute it), or a slot index for `usize`. + pub fn set_scalars(&self, offset: usize, vals: [T; N]) { for (i, val) in vals.into_iter().enumerate() { - T::ctor_set(self, num_objs, offset + i * T::OFFSET, val); + T::ctor_set(self, offset + i * T::OFFSET, val); } } /// Convenience alias for `set_scalars::`. - pub fn set_bools(&self, num_objs: usize, offset: usize, vals: [bool; N]) { - self.set_scalars(num_objs, offset, vals); + pub fn set_bools(&self, offset: usize, vals: [bool; N]) { + self.set_scalars(offset, vals); } } @@ -1194,90 +1177,90 @@ pub trait LeanCtorScalar: Copy { const OFFSET: usize; /// Read one scalar field at `offset`. - fn ctor_get(ctor: &LeanCtor, num_objs: usize, offset: usize) -> Self; + fn ctor_get(ctor: &LeanCtor, offset: usize) -> Self; /// Write one scalar field at `offset` (owned ctor only). - fn ctor_set(ctor: &LeanCtor, num_objs: usize, offset: usize, val: Self); + fn ctor_set(ctor: &LeanCtor, offset: usize, val: Self); } impl LeanCtorScalar for bool { const OFFSET: usize = 1; - fn ctor_get(ctor: &LeanCtor, num_objs: usize, offset: usize) -> Self { - ctor.get_bool(num_objs, offset) + fn ctor_get(ctor: &LeanCtor, offset: usize) -> Self { + ctor.get_bool(offset) } - fn ctor_set(ctor: &LeanCtor, num_objs: usize, offset: usize, val: Self) { - ctor.set_bool(num_objs, offset, val); + fn ctor_set(ctor: &LeanCtor, offset: usize, val: Self) { + ctor.set_bool(offset, val); } } impl LeanCtorScalar for u8 { const OFFSET: usize = 1; - fn ctor_get(ctor: &LeanCtor, num_objs: usize, offset: usize) -> Self { - ctor.get_u8(num_objs, offset) + fn ctor_get(ctor: &LeanCtor, offset: usize) -> Self { + ctor.get_u8(offset) } - fn ctor_set(ctor: &LeanCtor, num_objs: usize, offset: usize, val: Self) { - ctor.set_u8(num_objs, offset, val); + fn ctor_set(ctor: &LeanCtor, offset: usize, val: Self) { + ctor.set_u8(offset, val); } } impl LeanCtorScalar for u16 { const OFFSET: usize = 2; - fn ctor_get(ctor: &LeanCtor, num_objs: usize, offset: usize) -> Self { - ctor.get_u16(num_objs, offset) + fn ctor_get(ctor: &LeanCtor, offset: usize) -> Self { + ctor.get_u16(offset) } - fn ctor_set(ctor: &LeanCtor, num_objs: usize, offset: usize, val: Self) { - ctor.set_u16(num_objs, offset, val); + fn ctor_set(ctor: &LeanCtor, offset: usize, val: Self) { + ctor.set_u16(offset, val); } } impl LeanCtorScalar for u32 { const OFFSET: usize = 4; - fn ctor_get(ctor: &LeanCtor, num_objs: usize, offset: usize) -> Self { - ctor.get_u32(num_objs, offset) + fn ctor_get(ctor: &LeanCtor, offset: usize) -> Self { + ctor.get_u32(offset) } - fn ctor_set(ctor: &LeanCtor, num_objs: usize, offset: usize, val: Self) { - ctor.set_u32(num_objs, offset, val); + fn ctor_set(ctor: &LeanCtor, offset: usize, val: Self) { + ctor.set_u32(offset, val); } } impl LeanCtorScalar for u64 { const OFFSET: usize = 8; - fn ctor_get(ctor: &LeanCtor, num_objs: usize, offset: usize) -> Self { - ctor.get_u64(num_objs, offset) + fn ctor_get(ctor: &LeanCtor, offset: usize) -> Self { + ctor.get_u64(offset) } - fn ctor_set(ctor: &LeanCtor, num_objs: usize, offset: usize, val: Self) { - ctor.set_u64(num_objs, offset, val); + fn ctor_set(ctor: &LeanCtor, offset: usize, val: Self) { + ctor.set_u64(offset, val); } } impl LeanCtorScalar for f32 { const OFFSET: usize = 4; - fn ctor_get(ctor: &LeanCtor, num_objs: usize, offset: usize) -> Self { - ctor.get_f32(num_objs, offset) + fn ctor_get(ctor: &LeanCtor, offset: usize) -> Self { + ctor.get_f32(offset) } - fn ctor_set(ctor: &LeanCtor, num_objs: usize, offset: usize, val: Self) { - ctor.set_f32(num_objs, offset, val); + fn ctor_set(ctor: &LeanCtor, offset: usize, val: Self) { + ctor.set_f32(offset, val); } } impl LeanCtorScalar for f64 { const OFFSET: usize = 8; - fn ctor_get(ctor: &LeanCtor, num_objs: usize, offset: usize) -> Self { - ctor.get_f64(num_objs, offset) + fn ctor_get(ctor: &LeanCtor, offset: usize) -> Self { + ctor.get_f64(offset) } - fn ctor_set(ctor: &LeanCtor, num_objs: usize, offset: usize, val: Self) { - ctor.set_f64(num_objs, offset, val); + fn ctor_set(ctor: &LeanCtor, offset: usize, val: Self) { + ctor.set_f64(offset, val); } } impl LeanCtorScalar for usize { // USize fields are indexed by slot, so offset is 1 slot. const OFFSET: usize = 1; - fn ctor_get(ctor: &LeanCtor, num_objs: usize, offset: usize) -> Self { - ctor.get_usize(num_objs, offset) + fn ctor_get(ctor: &LeanCtor, offset: usize) -> Self { + ctor.get_usize(offset) } - fn ctor_set(ctor: &LeanCtor, num_objs: usize, offset: usize, val: Self) { - ctor.set_usize(num_objs, offset, val); + fn ctor_set(ctor: &LeanCtor, offset: usize, val: Self) { + ctor.set_usize(offset, val); } } diff --git a/src/test_ffi.rs b/src/test_ffi.rs index df8a652..932acad 100644 --- a/src/test_ffi.rs +++ b/src/test_ffi.rs @@ -246,15 +246,17 @@ pub(crate) extern "C" fn rs_roundtrip_scalar_struct( ) -> LeanScalarStruct { let ctor = ptr.as_ctor(); let obj_nat = Nat::from_obj(&ctor.get(0)); - let u64val = ctor.get_u64(1, 0); - let u32val = ctor.get_u32(1, 8); - let u8val = ctor.get_u8(1, 12); + let s = ctor.scalar_base(0); + let u64val = ctor.get_u64(s); + let u32val = ctor.get_u32(s + 8); + let u8val = ctor.get_u8(s + 12); let out = LeanCtor::alloc(0, 1, 13); + let s = out.scalar_base(0); out.set(0, build_nat(&obj_nat)); - out.set_u64(1, 0, u64val); - out.set_u32(1, 8, u32val); - out.set_u8(1, 12, u8val); + out.set_u64(s, u64val); + out.set_u32(s + 8, u32val); + out.set_u8(s + 12, u8val); LeanScalarStruct::new(out.into()) } @@ -373,21 +375,23 @@ pub(crate) extern "C" fn rs_roundtrip_ext_scalar_struct( ) -> LeanExtScalarStruct { let ctor = ptr.as_ctor(); let obj_nat = Nat::from_obj(&ctor.get(0)); - let u64val = ctor.get_u64(1, 0); - let fval = ctor.get_f64(1, 8); - let u32val = ctor.get_u32(1, 16); - let f32val = ctor.get_f32(1, 20); - let u16val = ctor.get_u16(1, 24); - let u8val = ctor.get_u8(1, 26); + let s = ctor.scalar_base(0); + let u64val = ctor.get_u64(s); + let fval = ctor.get_f64(s + 8); + let u32val = ctor.get_u32(s + 16); + let f32val = ctor.get_f32(s + 20); + let u16val = ctor.get_u16(s + 24); + let u8val = ctor.get_u8(s + 26); let out = LeanCtor::alloc(0, 1, 27); + let s = out.scalar_base(0); out.set(0, build_nat(&obj_nat)); - out.set_u64(1, 0, u64val); - out.set_f64(1, 8, fval); - out.set_u32(1, 16, u32val); - out.set_f32(1, 20, f32val); - out.set_u16(1, 24, u16val); - out.set_u8(1, 26, u8val); + out.set_u64(s, u64val); + out.set_f64(s + 8, fval); + out.set_u32(s + 16, u32val); + out.set_f32(s + 20, f32val); + out.set_u16(s + 24, u16val); + out.set_u8(s + 26, u8val); LeanExtScalarStruct::new(out.into()) } @@ -396,7 +400,7 @@ pub(crate) extern "C" fn rs_roundtrip_ext_scalar_struct( // ============================================================================= /// Round-trip a USizeStruct. -/// Lean layout: 1 obj field, then usize (slot 0), then u8 at scalar offset 0. +/// Lean layout: 1 obj field, then usize (slot 0), then u8. /// Alloc: num_objs=1, scalar_sz=9 (8 for usize slot + 1 for u8). #[unsafe(no_mangle)] pub(crate) extern "C" fn rs_roundtrip_usize_struct( @@ -404,13 +408,15 @@ pub(crate) extern "C" fn rs_roundtrip_usize_struct( ) -> LeanUSizeStruct { let ctor = ptr.as_ctor(); let obj_nat = Nat::from_obj(&ctor.get(0)); - let uval = ctor.get_usize(1, 0); - let u8val = ctor.get_u8(2, 0); + let uval = ctor.get_usize(0); + let s = ctor.scalar_base(1); // 1 usize field + let u8val = ctor.get_u8(s); let out = LeanCtor::alloc(0, 1, 9); out.set(0, build_nat(&obj_nat)); - out.set_usize(1, 0, uval); - out.set_u8(2, 0, u8val); + out.set_usize(0, uval); + let s = out.scalar_base(1); + out.set_u8(s, u8val); LeanUSizeStruct::new(out.into()) } @@ -427,11 +433,13 @@ pub(crate) extern "C" fn rs_roundtrip_bool_struct( ) -> LeanBoolStruct { let ctor = ptr.as_ctor(); let obj_nat = Nat::from_obj(&ctor.get(0)); - let [b1, b2, b3] = ctor.scalars::<3, bool>(1, 0); + let s = ctor.scalar_base(0); + let [b1, b2, b3] = ctor.scalars::<3, bool>(s); let out = LeanCtor::alloc(0, 1, 3); + let s = out.scalar_base(0); out.set(0, build_nat(&obj_nat)); - out.set_scalars::<3, bool>(1, 0, [b1, b2, b3]); + out.set_scalars::<3, bool>(s, [b1, b2, b3]); LeanBoolStruct::new(out.into()) } @@ -444,11 +452,13 @@ pub(crate) extern "C" fn rs_roundtrip_multi_u32_struct( ) -> LeanMultiU32Struct { let ctor = ptr.as_ctor(); let obj_nat = Nat::from_obj(&ctor.get(0)); - let [a, b, c] = ctor.scalars::<3, u32>(1, 0); + let s = ctor.scalar_base(0); + let [a, b, c] = ctor.scalars::<3, u32>(s); let out = LeanCtor::alloc(0, 1, 12); + let s = out.scalar_base(0); out.set(0, build_nat(&obj_nat)); - out.set_scalars::<3, u32>(1, 0, [a, b, c]); + out.set_scalars::<3, u32>(s, [a, b, c]); LeanMultiU32Struct::new(out.into()) } @@ -696,9 +706,10 @@ pub(crate) extern "C" fn rs_owned_prod_multiply(pair: LeanProd) -> Le pub(crate) extern "C" fn rs_owned_scalar_sum(ptr: LeanScalarStruct) -> u64 { // Lean descending-size layout: u64(0), u32(8), u8(12) let ctor = ptr.as_ctor(); - let u64val = ctor.get_u64(1, 0); - let u32val = ctor.get_u32(1, 8) as u64; - let u8val = ctor.get_u8(1, 12) as u64; + let s = ctor.scalar_base(0); + let u64val = ctor.get_u64(s); + let u32val = ctor.get_u32(s + 8) as u64; + let u8val = ctor.get_u8(s + 12) as u64; u64val + u32val + u8val // ptr drops here → lean_dec } From 493b5a1da2f029c5487cb98598d5456e08789501 Mon Sep 17 00:00:00 2001 From: samuelburnham <45365069+samuelburnham@users.noreply.github.com> Date: Tue, 14 Apr 2026 11:17:27 -0400 Subject: [PATCH 03/17] clippy --- src/object.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/object.rs b/src/object.rs index 05562bd..4df43aa 100644 --- a/src/object.rs +++ b/src/object.rs @@ -1003,7 +1003,7 @@ impl LeanCtor { /// ``` #[inline] pub fn scalar_base(&self, num_usize: usize) -> usize { - (self.num_objs() + num_usize) * std::mem::size_of::() + (self.num_objs() + num_usize) * size_of::() } /// All scalar accessors below take `offset` as an absolute byte offset From 460d7986a603552bca9b24b25e37683dbca04447 Mon Sep 17 00:00:00 2001 From: samuelburnham <45365069+samuelburnham@users.noreply.github.com> Date: Tue, 14 Apr 2026 11:32:57 -0400 Subject: [PATCH 04/17] Fixup --- src/object.rs | 6 +++--- src/test_ffi.rs | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/object.rs b/src/object.rs index 4df43aa..6762c3e 100644 --- a/src/object.rs +++ b/src/object.rs @@ -1042,13 +1042,13 @@ impl LeanCtor { /// `offset` is an absolute byte offset from `lean_ctor_obj_cptr` for /// fixed-size scalars (use [`scalar_base`](Self::scalar_base) to compute /// it), or a slot index for `usize`. - pub fn scalars(&self, offset: usize) -> [T; N] { + pub fn get_scalars(&self, offset: usize) -> [T; N] { std::array::from_fn(|i| T::ctor_get(self, offset + i * T::OFFSET)) } - /// Convenience alias for `scalars::`. + /// Convenience alias for `get_scalars::`. pub fn get_bools(&self, offset: usize) -> [bool; N] { - self.scalars(offset) + self.get_scalars(offset) } } diff --git a/src/test_ffi.rs b/src/test_ffi.rs index 932acad..a8249a9 100644 --- a/src/test_ffi.rs +++ b/src/test_ffi.rs @@ -434,7 +434,7 @@ pub(crate) extern "C" fn rs_roundtrip_bool_struct( let ctor = ptr.as_ctor(); let obj_nat = Nat::from_obj(&ctor.get(0)); let s = ctor.scalar_base(0); - let [b1, b2, b3] = ctor.scalars::<3, bool>(s); + let [b1, b2, b3] = ctor.get_scalars::<3, bool>(s); let out = LeanCtor::alloc(0, 1, 3); let s = out.scalar_base(0); @@ -453,7 +453,7 @@ pub(crate) extern "C" fn rs_roundtrip_multi_u32_struct( let ctor = ptr.as_ctor(); let obj_nat = Nat::from_obj(&ctor.get(0)); let s = ctor.scalar_base(0); - let [a, b, c] = ctor.scalars::<3, u32>(s); + let [a, b, c] = ctor.get_scalars::<3, u32>(s); let out = LeanCtor::alloc(0, 1, 12); let s = out.scalar_base(0); From 8da54955beec1c3565c58467b1f98c8d2a91a18c Mon Sep 17 00:00:00 2001 From: samuelburnham <45365069+samuelburnham@users.noreply.github.com> Date: Tue, 14 Apr 2026 11:42:14 -0400 Subject: [PATCH 05/17] Add a mixed ctor test --- Tests/FFI.lean | 6 ++++++ Tests/Gen.lean | 14 ++++++++++++++ src/test_ffi.rs | 27 +++++++++++++++++++++++++++ 3 files changed, 47 insertions(+) diff --git a/Tests/FFI.lean b/Tests/FFI.lean index d87e6a8..7ae853c 100644 --- a/Tests/FFI.lean +++ b/Tests/FFI.lean @@ -64,6 +64,9 @@ opaque roundtripExtScalarStruct : @& ExtScalarStruct → ExtScalarStruct @[extern "rs_roundtrip_usize_struct"] opaque roundtripUSizeStruct : @& USizeStruct → USizeStruct +@[extern "rs_roundtrip_usize_mixed_struct"] +opaque roundtripUSizeMixedStruct : @& USizeMixedStruct → USizeMixedStruct + @[extern "rs_roundtrip_bool_struct"] opaque roundtripBoolStruct : @& BoolStruct → BoolStruct @@ -365,6 +368,9 @@ def borrowedRoundtripTests : TestSeq := test "ExtScalarStruct max" (show Bool from roundtripExtScalarStruct ⟨100, 0xFF, 0xFFFF, 0xFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 1.0, 1.0⟩ == ⟨100, 0xFF, 0xFFFF, 0xFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 1.0, 1.0⟩) ++ test "USizeStruct zeros" (roundtripUSizeStruct ⟨0, 0, 0⟩ == ⟨0, 0, 0⟩) ++ test "USizeStruct mixed" (roundtripUSizeStruct ⟨42, 99, 255⟩ == ⟨42, 99, 255⟩) ++ + -- USizeMixedStruct declaration order: obj, u64val, bval, uval, u32val + test "USizeMixedStruct zeros" (roundtripUSizeMixedStruct ⟨0, 0, false, 0, 0⟩ == ⟨0, 0, false, 0, 0⟩) ++ + test "USizeMixedStruct mixed" (roundtripUSizeMixedStruct ⟨42, 0xFFFFFFFFFFFFFFFF, true, 99, 0xFFFFFFFF⟩ == ⟨42, 0xFFFFFFFFFFFFFFFF, true, 99, 0xFFFFFFFF⟩) ++ test "BoolStruct all false" (roundtripBoolStruct ⟨0, false, false, false⟩ == ⟨0, false, false, false⟩) ++ test "BoolStruct all true" (roundtripBoolStruct ⟨42, true, true, true⟩ == ⟨42, true, true, true⟩) ++ test "BoolStruct mixed" (roundtripBoolStruct ⟨7, true, false, true⟩ == ⟨7, true, false, true⟩) ++ diff --git a/Tests/Gen.lean b/Tests/Gen.lean index e549e8f..6f26372 100644 --- a/Tests/Gen.lean +++ b/Tests/Gen.lean @@ -122,6 +122,20 @@ structure USizeStruct where u8val : UInt8 deriving Repr, BEq, DecidableEq, Inhabited +/-- Structure mixing USize with other scalar types. + Declaration order deliberately differs from memory layout to test + that FFI code handles Lean's field reordering correctly. + Declaration: obj, u64val, bval, uval, u32val + Memory layout: [obj] [uval (usize slot)] [u64val @ +0] [u32val @ +8] [bval @ +12] + Total scalar size = 8 (usize) + 8 (u64) + 4 (u32) + 1 (bool) = 21. -/ +structure USizeMixedStruct where + obj : Nat + u64val : UInt64 + bval : Bool + uval : USize + u32val : UInt32 +deriving Repr, BEq, DecidableEq, Inhabited + /-- Structure with multiple Bool scalar fields for batch-read/write testing. Layout: 1 object field (obj : Nat), then 3 Bool scalars. -/ structure BoolStruct where diff --git a/src/test_ffi.rs b/src/test_ffi.rs index a8249a9..7570813 100644 --- a/src/test_ffi.rs +++ b/src/test_ffi.rs @@ -34,6 +34,8 @@ crate::lean_domain_type! { LeanUSizeStruct; /// Lean `RustData` — opaque external object LeanRustData; + /// Lean `USizeMixedStruct` — structure with USize + u64 + u32 + Bool + LeanUSizeMixedStruct; /// Lean `BoolStruct` — structure BoolStruct where obj : Nat; b1 : Bool; b2 : Bool; b3 : Bool LeanBoolStruct; /// Lean `MultiU32Struct` — structure MultiU32Struct where obj : Nat; a : UInt32; b : UInt32; c : UInt32 @@ -420,6 +422,31 @@ pub(crate) extern "C" fn rs_roundtrip_usize_struct( LeanUSizeStruct::new(out.into()) } +/// Round-trip a USizeMixedStruct. +/// Lean layout: 1 obj field, 1 usize slot, then scalars: u64(+0), u32(+8), bool(+12). +/// Total scalar size: 8 (usize) + 8 (u64) + 4 (u32) + 1 (bool) = 21 bytes. +#[unsafe(no_mangle)] +pub(crate) extern "C" fn rs_roundtrip_usize_mixed_struct( + ptr: LeanUSizeMixedStruct>, +) -> LeanUSizeMixedStruct { + let ctor = ptr.as_ctor(); + let obj_nat = Nat::from_obj(&ctor.get(0)); + let uval = ctor.get_usize(0); + let s = ctor.scalar_base(1); // 1 usize field + let u64val = ctor.get_u64(s); + let u32val = ctor.get_u32(s + 8); + let bval = ctor.get_bool(s + 12); + + let out = LeanCtor::alloc(0, 1, 21); + out.set(0, build_nat(&obj_nat)); + out.set_usize(0, uval); + let s = out.scalar_base(1); + out.set_u64(s, u64val); + out.set_u32(s + 8, u32val); + out.set_bool(s + 12, bval); + LeanUSizeMixedStruct::new(out.into()) +} + // ============================================================================= // BoolStruct / MultiU32Struct roundtrips (batch scalar access) // ============================================================================= From fc6321d98209ecc095b38958adfa7ce94126a355 Mon Sep 17 00:00:00 2001 From: samuelburnham <45365069+samuelburnham@users.noreply.github.com> Date: Tue, 14 Apr 2026 14:29:38 -0400 Subject: [PATCH 06/17] Address review --- Tests/FFI.lean | 1 + src/object.rs | 123 +++++++++++++++++++++++++++++++++--------------- src/test_ffi.rs | 17 ++++++- 3 files changed, 102 insertions(+), 39 deletions(-) diff --git a/Tests/FFI.lean b/Tests/FFI.lean index 7ae853c..bb9835e 100644 --- a/Tests/FFI.lean +++ b/Tests/FFI.lean @@ -70,6 +70,7 @@ opaque roundtripUSizeMixedStruct : @& USizeMixedStruct → USizeMixedStruct @[extern "rs_roundtrip_bool_struct"] opaque roundtripBoolStruct : @& BoolStruct → BoolStruct + @[extern "rs_roundtrip_multi_u32_struct"] opaque roundtripMultiU32Struct : @& MultiU32Struct → MultiU32Struct diff --git a/src/object.rs b/src/object.rs index 6762c3e..41dd78f 100644 --- a/src/object.rs +++ b/src/object.rs @@ -903,6 +903,25 @@ impl From> for LeanOwned { /// Typed wrapper for a Lean constructor object (tag 0–`LEAN_MAX_CTOR_TAG`). /// +/// # Overall structure +/// +/// In C, a constructor is: +/// ```c +/// typedef struct { +/// lean_object m_header; // 8 bytes: tag in m_tag, obj count in m_other +/// lean_object * m_objs[]; // flexible array: data area starts here +/// } lean_ctor_object; +/// ``` +/// +/// `m_objs` is a C flexible array member — it has no compile-time size. +/// `lean_alloc_ctor(tag, num_objs, scalar_sz)` over-allocates so that the +/// region starting at `m_objs` is large enough for `num_objs` pointer-width +/// entries **plus** `scalar_sz` additional bytes for scalar fields. Despite +/// the `lean_object *` element type, the trailing bytes are not pointers — +/// the C API accesses them by casting to `uint8_t*` and indexing by byte +/// offset. `lean_ctor_obj_cptr` returns `&m_objs[0]`; all offsets in the +/// accessor API are relative to this address. +/// /// # Constructor field kinds /// /// Lean constructor fields are either: @@ -913,37 +932,67 @@ impl From> for LeanOwned { /// - **Scalar fields**: values stored inline as their unboxed C types /// (`uint8_t`, `uint32_t`, `double`, etc.) rather than as `lean_object*`. /// For example, `Bool` is stored as an unboxed `uint8_t` when it appears -/// directly in a constructor, but as a boxed `lean_object*` in polymorphic -/// contexts like `Array Bool`. +/// directly in a `lean_ctor_object`, but +/// as a boxed `lean_object*` in polymorphic contexts like `Array Bool`. /// /// # Data area layout /// -/// After the 8-byte header, a constructor's data area begins with a contiguous -/// array of pointer-width entries accessed via `lean_ctor_obj_cptr`. A single -/// pointer-width entry in this array is called a **slot**. The full layout is: +/// **Important:** Lean reorders fields by kind and size, so the memory layout +/// may differ from the declaration order in Lean source. The data area +/// (accessed via `lean_ctor_obj_cptr`) is laid out as three regions, following +/// the terminology from the +/// [Lean reference manual](https://lean-lang.org/doc/reference/latest/): /// -/// 1. Object field slots (one `lean_object*` per field) -/// 2. `USize` slots (one `usize` per field) -/// 3. Fixed-size scalar bytes, sorted by descending size +/// 1. **Object fields** — pointer-width `lean_object*` entries, one per field +/// ("fields of the first kind") +/// 2. **`USize` fields** — pointer-width `usize` entries, one per field +/// ("fields of the second kind"); together with object fields these form +/// a contiguous array of pointer-width entries +/// 3. **Fixed-size scalar fields** — laid out in descending size order /// (u64/f64, u32/f32, u16, u8/bool) /// +/// For example, a Lean structure declared as: +/// ```text +/// structure Foo where +/// name : String -- object field +/// flag : Bool -- scalar (u8) +/// count : UInt64 -- scalar (u64) +/// size : USize -- usize field +/// ``` +/// is reordered and laid out as (assuming 64-bit pointers): +/// ```text +/// lean_ctor_obj_cptr ──> +/// byte 0..7: name (lean_object*) ← m_objs[0], object field +/// byte 8..15: size (usize) ← m_objs[1], USize field +/// byte 16..23: count (u64) ← scalar field +/// byte 24: flag (u8) ← scalar field +/// ``` +/// `lean_alloc_ctor(tag, num_objs=1, scalar_sz=9)` allocates enough space +/// for the header, 1 object pointer, and 9 scalar bytes (8 for the USize +/// entry + 8 for the u64 + 1 for the u8). The scalar fields live past the +/// end of the pointer-width entries; the C API accesses them by casting +/// `lean_ctor_obj_cptr` to `uint8_t*` and indexing by byte offset. +/// /// # Offset conventions /// /// For fixed-size scalar types (`u8`–`u64`, `f32`, `f64`, `bool`), `offset` is /// an **absolute byte offset** from `lean_ctor_obj_cptr` — the same convention /// as the Lean C API functions `lean_ctor_get_uint8`, `lean_ctor_set_uint32`, /// etc. Use [`scalar_base`](Self::scalar_base) to compute where the scalar -/// area starts: +/// fields start. Reading `Foo` from the example above: /// /// ```ignore -/// let base = ctor.scalar_base(0); // 0 USize fields; obj count from header -/// let x = ctor.get_u64(base + 0); -/// let y = ctor.get_u32(base + 8); +/// let name = ctor.get(0); // object field +/// let size = ctor.get_usize(0); // first USize field +/// let base = ctor.scalar_base(1); // 1 USize field +/// let count = ctor.get_u64(base + 0); // first scalar +/// let flag = ctor.get_bool(base + 8); // next scalar /// ``` /// -/// For `usize`, the accessor takes a 0-based **slot index**. `USize` slots sit -/// right after object field slots; the header-derived object count is added -/// internally, so `get_usize(0)` reads the first `USize` field. +/// For `usize`, the accessor indexes into the pointer-width entry array +/// described above. The index is relative to the first `USize` entry; +/// the object field count is read from the header and added internally, so +/// `get_usize(0)` reads the first `USize` field. #[repr(transparent)] pub struct LeanCtor(R); @@ -1027,12 +1076,15 @@ impl LeanCtor { pub fn get_f32(&self, offset: usize) -> f32 { unsafe { include::lean_ctor_get_float32(self.0.as_raw(), to_u32(offset)) } } - /// Read a `usize` at the given slot index (0-based, relative to the first - /// `USize` slot after object fields). The object field count is read from - /// the header and added internally. - pub fn get_usize(&self, slot: usize) -> usize { - unsafe { include::lean_ctor_get_usize(self.0.as_raw(), to_u32(self.num_objs() + slot)) } + /// Read a `USize` field. `USize` fields occupy pointer-width entries in + /// the data area right after object fields. `index` is relative to + /// the first `USize` entry; the object field count is read from the header + /// and added internally. + pub fn get_usize(&self, index: usize) -> usize { + unsafe { include::lean_ctor_get_usize(self.0.as_raw(), to_u32(self.num_objs() + index)) } } + /// Read a single `Bool` scalar field (`uint8_t`). + /// Returns `true` if the byte is non-zero. pub fn get_bool(&self, offset: usize) -> bool { self.get_u8(offset) != 0 } @@ -1041,15 +1093,11 @@ impl LeanCtor { /// /// `offset` is an absolute byte offset from `lean_ctor_obj_cptr` for /// fixed-size scalars (use [`scalar_base`](Self::scalar_base) to compute - /// it), or a slot index for `usize`. + /// it), or an index into the pointer-width `USize` entries for + /// `usize`. pub fn get_scalars(&self, offset: usize) -> [T; N] { std::array::from_fn(|i| T::ctor_get(self, offset + i * T::OFFSET)) } - - /// Convenience alias for `get_scalars::`. - pub fn get_bools(&self, offset: usize) -> [bool; N] { - self.get_scalars(offset) - } } impl LeanCtor { @@ -1125,14 +1173,16 @@ impl LeanCtor { include::lean_ctor_set_float32(self.0.as_raw(), to_u32(offset), val); } } - /// Set a `usize` at the given slot index (0-based, relative to the first - /// `USize` slot after object fields). The object field count is read from - /// the header and added internally. - pub fn set_usize(&self, slot: usize, val: usize) { + /// Set a `USize` field. `USize` fields occupy pointer-width entries in + /// the data area right after object fields. `index` is relative to + /// the first `USize` entry; the object field count is read from the header + /// and added internally. + pub fn set_usize(&self, index: usize, val: usize) { unsafe { - include::lean_ctor_set_usize(self.0.as_raw(), to_u32(self.num_objs() + slot), val); + include::lean_ctor_set_usize(self.0.as_raw(), to_u32(self.num_objs() + index), val); } } + /// Write a single `Bool` scalar field (`uint8_t`, 0 or 1). pub fn set_bool(&self, offset: usize, val: bool) { self.set_u8(offset, val as u8); } @@ -1141,17 +1191,13 @@ impl LeanCtor { /// /// `offset` is an absolute byte offset from `lean_ctor_obj_cptr` for /// fixed-size scalars (use [`scalar_base`](LeanCtor::scalar_base) to - /// compute it), or a slot index for `usize`. + /// compute it), or a `USize` field index for `usize`. pub fn set_scalars(&self, offset: usize, vals: [T; N]) { for (i, val) in vals.into_iter().enumerate() { T::ctor_set(self, offset + i * T::OFFSET, val); } } - /// Convenience alias for `set_scalars::`. - pub fn set_bools(&self, offset: usize, vals: [bool; N]) { - self.set_scalars(offset, vals); - } } impl From> for LeanOwned { @@ -1173,7 +1219,8 @@ impl From> for LeanOwned { /// See [`LeanCtor`] for the full data area layout and offset conventions. pub trait LeanCtorScalar: Copy { /// Distance between consecutive fields of this type: - /// `size_of::()` bytes for fixed-size scalars, or 1 slot for `usize`. + /// `size_of::()` bytes for fixed-size scalars, or 1 for `usize` + /// (indexed by pointer-width entry). const OFFSET: usize; /// Read one scalar field at `offset`. @@ -1254,7 +1301,7 @@ impl LeanCtorScalar for f64 { } impl LeanCtorScalar for usize { - // USize fields are indexed by slot, so offset is 1 slot. + // USize fields are pointer-width entries accessed by index, so offset is 1. const OFFSET: usize = 1; fn ctor_get(ctor: &LeanCtor, offset: usize) -> Self { ctor.get_usize(offset) diff --git a/src/test_ffi.rs b/src/test_ffi.rs index 7570813..15c7487 100644 --- a/src/test_ffi.rs +++ b/src/test_ffi.rs @@ -247,6 +247,7 @@ pub(crate) extern "C" fn rs_roundtrip_scalar_struct( ptr: LeanScalarStruct>, ) -> LeanScalarStruct { let ctor = ptr.as_ctor(); + assert!(ctor.num_objs() == 1, "ScalarStruct should have 1 obj field"); let obj_nat = Nat::from_obj(&ctor.get(0)); let s = ctor.scalar_base(0); let u64val = ctor.get_u64(s); @@ -254,6 +255,10 @@ pub(crate) extern "C" fn rs_roundtrip_scalar_struct( let u8val = ctor.get_u8(s + 12); let out = LeanCtor::alloc(0, 1, 13); + assert!( + out.num_objs() == 1, + "alloc'd ScalarStruct should have 1 obj field" + ); let s = out.scalar_base(0); out.set(0, build_nat(&obj_nat)); out.set_u64(s, u64val); @@ -376,8 +381,13 @@ pub(crate) extern "C" fn rs_roundtrip_ext_scalar_struct( ptr: LeanExtScalarStruct>, ) -> LeanExtScalarStruct { let ctor = ptr.as_ctor(); + assert!( + ctor.num_objs() == 1, + "ExtScalarStruct should have 1 obj field" + ); let obj_nat = Nat::from_obj(&ctor.get(0)); let s = ctor.scalar_base(0); + assert!(s == 8, "scalar_base(0) with 1 obj should be 8"); let u64val = ctor.get_u64(s); let fval = ctor.get_f64(s + 8); let u32val = ctor.get_u32(s + 16); @@ -430,6 +440,11 @@ pub(crate) extern "C" fn rs_roundtrip_usize_mixed_struct( ptr: LeanUSizeMixedStruct>, ) -> LeanUSizeMixedStruct { let ctor = ptr.as_ctor(); + // num_objs() should return 1 (only the Nat obj field), not counting USize + assert!( + ctor.num_objs() == 1, + "USizeMixedStruct should have 1 obj field" + ); let obj_nat = Nat::from_obj(&ctor.get(0)); let uval = ctor.get_usize(0); let s = ctor.scalar_base(1); // 1 usize field @@ -451,7 +466,7 @@ pub(crate) extern "C" fn rs_roundtrip_usize_mixed_struct( // BoolStruct / MultiU32Struct roundtrips (batch scalar access) // ============================================================================= -/// Round-trip a BoolStruct using `scalars` / `set_scalars`. +/// Round-trip a BoolStruct using `get_scalars` / `set_scalars` with bool. /// Lean layout: 1 obj field, then 3 Bool scalars at offsets 0, 1, 2. /// Total scalar size: 3 bytes. #[unsafe(no_mangle)] From e931b7a9108a02208776d07312ff66a4f56cfdf9 Mon Sep 17 00:00:00 2001 From: samuelburnham <45365069+samuelburnham@users.noreply.github.com> Date: Tue, 14 Apr 2026 14:33:52 -0400 Subject: [PATCH 07/17] Fmt --- src/object.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/src/object.rs b/src/object.rs index 41dd78f..d6ef457 100644 --- a/src/object.rs +++ b/src/object.rs @@ -1197,7 +1197,6 @@ impl LeanCtor { T::ctor_set(self, offset + i * T::OFFSET, val); } } - } impl From> for LeanOwned { From 218fc08c9e8a42f265fe1f067efef2df9ff2cfbc Mon Sep 17 00:00:00 2001 From: samuelburnham <45365069+samuelburnham@users.noreply.github.com> Date: Tue, 14 Apr 2026 17:39:29 -0400 Subject: [PATCH 08/17] Refactor to use get/set ctor trait helpers --- src/lib.rs | 6 + src/object.rs | 306 +++++++++++++++++++++--------------------------- src/test_ffi.rs | 268 ++++++++++++++++++++++-------------------- 3 files changed, 280 insertions(+), 300 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index bfc274c..eca14e5 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -61,6 +61,7 @@ pub unsafe extern "C" fn noop_foreach(_: *mut c_void, _: *mut include::lean_obje /// /// Domain types should be prefixed with `Lean` to distinguish them from Lean-side types /// and to match the built-in types (`LeanArray`, `LeanString`, `LeanNat`, etc.). +/// /// For example, a Lean `Point` structure becomes `LeanPoint` in Rust: /// /// ```ignore @@ -69,6 +70,11 @@ pub unsafe extern "C" fn noop_foreach(_: *mut c_void, _: *mut include::lean_obje /// LeanPoint; /// } /// ``` +/// +/// For structures with scalar fields, implement [`LeanCtorScalar`] on +/// the generated type to get type-indexed `get_*`/`set_*` accessors. +/// +/// [`LeanCtorScalar`]: object::LeanCtorScalar #[macro_export] macro_rules! lean_domain_type { ($($(#[$meta:meta])* $name:ident;)*) => {$( diff --git a/src/object.rs b/src/object.rs index d6ef457..28519dd 100644 --- a/src/object.rs +++ b/src/object.rs @@ -975,24 +975,35 @@ impl From> for LeanOwned { /// /// # Offset conventions /// -/// For fixed-size scalar types (`u8`–`u64`, `f32`, `f64`, `bool`), `offset` is -/// an **absolute byte offset** from `lean_ctor_obj_cptr` — the same convention -/// as the Lean C API functions `lean_ctor_get_uint8`, `lean_ctor_set_uint32`, -/// etc. Use [`scalar_base`](Self::scalar_base) to compute where the scalar -/// fields start. Reading `Foo` from the example above: -/// -/// ```ignore -/// let name = ctor.get(0); // object field -/// let size = ctor.get_usize(0); // first USize field -/// let base = ctor.scalar_base(1); // 1 USize field -/// let count = ctor.get_u64(base + 0); // first scalar -/// let flag = ctor.get_bool(base + 8); // next scalar -/// ``` +/// For fixed-size scalar types (`u8`–`u64`, `f32`, `f64`, `bool`), the +/// `LeanCtor` methods take `offset` as an **absolute byte offset** from +/// `lean_ctor_obj_cptr` — the same convention as the Lean C API functions +/// `lean_ctor_get_uint8`, `lean_ctor_set_uint32`, etc. /// /// For `usize`, the accessor indexes into the pointer-width entry array /// described above. The index is relative to the first `USize` entry; /// the object field count is read from the header and added internally, so /// `get_usize(0)` reads the first `USize` field. +/// +/// The [`LeanCtorScalar`] trait computes these byte offsets automatically +/// from declared field counts, so users don't need to calculate them +/// manually. Reading all fields of `Foo` in C and with the trait: +/// +/// ```c +/// // C — all offsets are from lean_ctor_obj_cptr +/// lean_object* name = lean_ctor_get(o, 0); // array index 0 +/// size_t size = lean_ctor_get_usize(o, 1); // array index 1 (num_objs + 0) +/// uint64_t count = lean_ctor_get_uint64(o, 16); // byte offset (1 obj + 1 usize) * 8 +/// uint8_t flag = lean_ctor_get_uint8(o, 24); // byte offset 16 + sizeof(uint64_t) +/// ``` +/// +/// ```ignore +/// // Rust — using LeanCtorScalar trait (no manual offsets) +/// let name = foo.as_ctor().get(0); // object field +/// let size = foo.get_usize(0); // first USize field +/// let count = foo.get_u64(0); // first u64 scalar +/// let flag = foo.get_bool(0); // first bool scalar +/// ``` #[repr(transparent)] pub struct LeanCtor(R); @@ -1040,21 +1051,6 @@ impl LeanCtor { unsafe { include::lean_ctor_num_objs(self.0.as_raw()) as usize } } - /// Byte offset from `lean_ctor_obj_cptr` where fixed-size scalar fields - /// begin. Pass `num_usize` = number of `USize` fields in this constructor - /// (0 for most types). The object field count is read from the header. - /// - /// Use the returned value as a base when calling scalar accessors: - /// ```ignore - /// let base = ctor.scalar_base(0); - /// let x = ctor.get_u64(base + 0); - /// let y = ctor.get_u32(base + 8); - /// ``` - #[inline] - pub fn scalar_base(&self, num_usize: usize) -> usize { - (self.num_objs() + num_usize) * size_of::() - } - /// All scalar accessors below take `offset` as an absolute byte offset /// from `lean_ctor_obj_cptr`, matching the Lean C API convention for /// `lean_ctor_get_uint8`, `lean_ctor_get_uint32`, etc. @@ -1089,15 +1085,45 @@ impl LeanCtor { self.get_u8(offset) != 0 } - /// Read `N` consecutive scalar fields of type `T` starting at `offset`. - /// - /// `offset` is an absolute byte offset from `lean_ctor_obj_cptr` for - /// fixed-size scalars (use [`scalar_base`](Self::scalar_base) to compute - /// it), or an index into the pointer-width `USize` entries for - /// `usize`. - pub fn get_scalars(&self, offset: usize) -> [T; N] { - std::array::from_fn(|i| T::ctor_get(self, offset + i * T::OFFSET)) + + // ------------------------------------------------------------------------- + // Scalar field setters + // ------------------------------------------------------------------------- + // + // All setters take `offset` as an absolute byte offset from + // `lean_ctor_obj_cptr`, matching the Lean C API convention. + // Available on all R: LeanRef (not restricted to LeanOwned). + + pub fn set_u8(&self, offset: usize, val: u8) { + unsafe { include::lean_ctor_set_uint8(self.0.as_raw(), to_u32(offset), val); } + } + pub fn set_u16(&self, offset: usize, val: u16) { + unsafe { include::lean_ctor_set_uint16(self.0.as_raw(), to_u32(offset), val); } + } + pub fn set_u32(&self, offset: usize, val: u32) { + unsafe { include::lean_ctor_set_uint32(self.0.as_raw(), to_u32(offset), val); } + } + pub fn set_u64(&self, offset: usize, val: u64) { + unsafe { include::lean_ctor_set_uint64(self.0.as_raw(), to_u32(offset), val); } } + pub fn set_f64(&self, offset: usize, val: f64) { + unsafe { include::lean_ctor_set_float(self.0.as_raw(), to_u32(offset), val); } + } + pub fn set_f32(&self, offset: usize, val: f32) { + unsafe { include::lean_ctor_set_float32(self.0.as_raw(), to_u32(offset), val); } + } + /// Set a `USize` field. `USize` fields occupy pointer-width entries in + /// the data area right after object fields. `index` is relative to + /// the first `USize` entry; the object field count is read from the header + /// and added internally. + pub fn set_usize(&self, index: usize, val: usize) { + unsafe { include::lean_ctor_set_usize(self.0.as_raw(), to_u32(self.num_objs() + index), val); } + } + /// Write a single `Bool` scalar field (`uint8_t`, 0 or 1). + pub fn set_bool(&self, offset: usize, val: bool) { + self.set_u8(offset, val as u8); + } + } impl LeanCtor { @@ -1135,68 +1161,6 @@ impl LeanCtor { std::mem::forget(self); ptr } - - // ------------------------------------------------------------------------- - // Scalar field setters (owned only — mutation) - // ------------------------------------------------------------------------- - // - // All setters take `offset` as an absolute byte offset from - // `lean_ctor_obj_cptr`, matching the Lean C API convention. - - pub fn set_u8(&self, offset: usize, val: u8) { - unsafe { - include::lean_ctor_set_uint8(self.0.as_raw(), to_u32(offset), val); - } - } - pub fn set_u16(&self, offset: usize, val: u16) { - unsafe { - include::lean_ctor_set_uint16(self.0.as_raw(), to_u32(offset), val); - } - } - pub fn set_u32(&self, offset: usize, val: u32) { - unsafe { - include::lean_ctor_set_uint32(self.0.as_raw(), to_u32(offset), val); - } - } - pub fn set_u64(&self, offset: usize, val: u64) { - unsafe { - include::lean_ctor_set_uint64(self.0.as_raw(), to_u32(offset), val); - } - } - pub fn set_f64(&self, offset: usize, val: f64) { - unsafe { - include::lean_ctor_set_float(self.0.as_raw(), to_u32(offset), val); - } - } - pub fn set_f32(&self, offset: usize, val: f32) { - unsafe { - include::lean_ctor_set_float32(self.0.as_raw(), to_u32(offset), val); - } - } - /// Set a `USize` field. `USize` fields occupy pointer-width entries in - /// the data area right after object fields. `index` is relative to - /// the first `USize` entry; the object field count is read from the header - /// and added internally. - pub fn set_usize(&self, index: usize, val: usize) { - unsafe { - include::lean_ctor_set_usize(self.0.as_raw(), to_u32(self.num_objs() + index), val); - } - } - /// Write a single `Bool` scalar field (`uint8_t`, 0 or 1). - pub fn set_bool(&self, offset: usize, val: bool) { - self.set_u8(offset, val as u8); - } - - /// Write `N` consecutive scalar fields of type `T` starting at `offset`. - /// - /// `offset` is an absolute byte offset from `lean_ctor_obj_cptr` for - /// fixed-size scalars (use [`scalar_base`](LeanCtor::scalar_base) to - /// compute it), or a `USize` field index for `usize`. - pub fn set_scalars(&self, offset: usize, vals: [T; N]) { - for (i, val) in vals.into_iter().enumerate() { - T::ctor_set(self, offset + i * T::OFFSET, val); - } - } } impl From> for LeanOwned { @@ -1209,104 +1173,96 @@ impl From> for LeanOwned { } } -// ----------------------------------------------------------------------------- -// LeanCtorScalar — trait for batch scalar field access -// ----------------------------------------------------------------------------- +// ============================================================================= +// LeanCtorScalar — trait for type-indexed scalar field access +// ============================================================================= -/// Trait for scalar types that can be batch-read/written from a [`LeanCtor`]. +/// Trait for type-indexed scalar field access on domain types. /// -/// See [`LeanCtor`] for the full data area layout and offset conventions. -pub trait LeanCtorScalar: Copy { - /// Distance between consecutive fields of this type: - /// `size_of::()` bytes for fixed-size scalars, or 1 for `usize` - /// (indexed by pointer-width entry). - const OFFSET: usize; - - /// Read one scalar field at `offset`. - fn ctor_get(ctor: &LeanCtor, offset: usize) -> Self; +/// Implement this on a `lean_domain_type!` wrapper to get `get_*`/`set_*` +/// methods that index by type rather than byte offset. Set the associated +/// constants to match your Lean structure's field counts: +/// +/// ```ignore +/// lean_domain_type! { LeanFoo; } +/// +/// impl LeanCtorScalar for LeanFoo { +/// const NUM_USIZE: usize = 1; +/// const NUM_U64: usize = 1; +/// fn as_ctor(&self) -> LeanCtor> { self.as_ctor() } +/// } +/// +/// let count = foo.get_u64(0); +/// let flag = foo.get_bool(0); +/// ``` +/// +/// Within each size tier, Lean preserves **declaration order** — it does not +/// sub-sort by type. Each tier has one getter/setter pair that returns the +/// natural integer type for that width. Use `f64::from_bits()` / +/// `f64::to_bits()` for float fields, and `val != 0` for bool fields. +pub trait LeanCtorScalar { + /// Number of `USize` fields (pointer-width, after object fields). + const NUM_USIZE: usize = 0; + /// Number of 8-byte scalar fields (`UInt64` + `Float`). + const NUM_8B: usize = 0; + /// Number of 4-byte scalar fields (`UInt32` + `Float32`). + const NUM_4B: usize = 0; + /// Number of 2-byte scalar fields (`UInt16`). + const NUM_2B: usize = 0; + // 1-byte count not needed — it's the last tier. - /// Write one scalar field at `offset` (owned ctor only). - fn ctor_set(ctor: &LeanCtor, offset: usize, val: Self); -} + /// Access the underlying constructor. Already generated by `lean_domain_type!`. + fn as_ctor(&self) -> LeanCtor>; -impl LeanCtorScalar for bool { - const OFFSET: usize = 1; - fn ctor_get(ctor: &LeanCtor, offset: usize) -> Self { - ctor.get_bool(offset) - } - fn ctor_set(ctor: &LeanCtor, offset: usize, val: Self) { - ctor.set_bool(offset, val); + /// Byte offset from `lean_ctor_obj_cptr` where fixed-size scalar fields begin. + /// Reads the object field count from the header and adds `NUM_USIZE`. + fn scalar_base(&self) -> usize { + (self.as_ctor().num_objs() + Self::NUM_USIZE) * size_of::() } -} -impl LeanCtorScalar for u8 { - const OFFSET: usize = 1; - fn ctor_get(ctor: &LeanCtor, offset: usize) -> Self { - ctor.get_u8(offset) - } - fn ctor_set(ctor: &LeanCtor, offset: usize, val: Self) { - ctor.set_u8(offset, val); - } -} + // -- USize fields -- -impl LeanCtorScalar for u16 { - const OFFSET: usize = 2; - fn ctor_get(ctor: &LeanCtor, offset: usize) -> Self { - ctor.get_u16(offset) + fn get_usize(&self, i: usize) -> usize { + self.as_ctor().get_usize(i) } - fn ctor_set(ctor: &LeanCtor, offset: usize, val: Self) { - ctor.set_u16(offset, val); + fn set_usize(&self, i: usize, val: usize) { + self.as_ctor().set_usize(i, val) } -} -impl LeanCtorScalar for u32 { - const OFFSET: usize = 4; - fn ctor_get(ctor: &LeanCtor, offset: usize) -> Self { - ctor.get_u32(offset) - } - fn ctor_set(ctor: &LeanCtor, offset: usize, val: Self) { - ctor.set_u32(offset, val); - } -} + // -- 8-byte tier (UInt64 / Float) -- -impl LeanCtorScalar for u64 { - const OFFSET: usize = 8; - fn ctor_get(ctor: &LeanCtor, offset: usize) -> Self { - ctor.get_u64(offset) + fn get_64(&self, i: usize) -> u64 { + self.as_ctor().get_u64(self.scalar_base() + i * 8) } - fn ctor_set(ctor: &LeanCtor, offset: usize, val: Self) { - ctor.set_u64(offset, val); + fn set_64(&self, i: usize, val: u64) { + self.as_ctor().set_u64(self.scalar_base() + i * 8, val) } -} -impl LeanCtorScalar for f32 { - const OFFSET: usize = 4; - fn ctor_get(ctor: &LeanCtor, offset: usize) -> Self { - ctor.get_f32(offset) + // -- 4-byte tier (UInt32 / Float32) -- + + fn get_32(&self, i: usize) -> u32 { + self.as_ctor().get_u32(self.scalar_base() + Self::NUM_8B * 8 + i * 4) } - fn ctor_set(ctor: &LeanCtor, offset: usize, val: Self) { - ctor.set_f32(offset, val); + fn set_32(&self, i: usize, val: u32) { + self.as_ctor().set_u32(self.scalar_base() + Self::NUM_8B * 8 + i * 4, val) } -} -impl LeanCtorScalar for f64 { - const OFFSET: usize = 8; - fn ctor_get(ctor: &LeanCtor, offset: usize) -> Self { - ctor.get_f64(offset) + // -- 2-byte tier (UInt16) -- + + fn get_16(&self, i: usize) -> u16 { + self.as_ctor().get_u16(self.scalar_base() + Self::NUM_8B * 8 + Self::NUM_4B * 4 + i * 2) } - fn ctor_set(ctor: &LeanCtor, offset: usize, val: Self) { - ctor.set_f64(offset, val); + fn set_16(&self, i: usize, val: u16) { + self.as_ctor().set_u16(self.scalar_base() + Self::NUM_8B * 8 + Self::NUM_4B * 4 + i * 2, val) } -} -impl LeanCtorScalar for usize { - // USize fields are pointer-width entries accessed by index, so offset is 1. - const OFFSET: usize = 1; - fn ctor_get(ctor: &LeanCtor, offset: usize) -> Self { - ctor.get_usize(offset) + // -- 1-byte tier (UInt8 / Bool) -- + + fn get_8(&self, i: usize) -> u8 { + self.as_ctor().get_u8(self.scalar_base() + Self::NUM_8B * 8 + Self::NUM_4B * 4 + Self::NUM_2B * 2 + i) } - fn ctor_set(ctor: &LeanCtor, offset: usize, val: Self) { - ctor.set_usize(offset, val); + fn set_8(&self, i: usize, val: u8) { + self.as_ctor().set_u8(self.scalar_base() + Self::NUM_8B * 8 + Self::NUM_4B * 4 + Self::NUM_2B * 2 + i, val) } } diff --git a/src/test_ffi.rs b/src/test_ffi.rs index 15c7487..47fa223 100644 --- a/src/test_ffi.rs +++ b/src/test_ffi.rs @@ -12,9 +12,9 @@ use std::sync::LazyLock; use crate::include; use crate::nat::Nat; use crate::object::{ - ExternalClass, LeanArray, LeanBool, LeanBorrowed, LeanByteArray, LeanCtor, LeanExcept, - LeanExternal, LeanIOResult, LeanList, LeanNat, LeanOption, LeanOwned, LeanProd, LeanRef, - LeanString, + ExternalClass, LeanArray, LeanBool, LeanBorrowed, LeanByteArray, LeanCtor, + LeanCtorScalar, LeanExcept, LeanExternal, LeanIOResult, LeanList, LeanNat, LeanOption, + LeanOwned, LeanProd, LeanRef, LeanString, }; // ============================================================================= @@ -36,12 +36,56 @@ crate::lean_domain_type! { LeanRustData; /// Lean `USizeMixedStruct` — structure with USize + u64 + u32 + Bool LeanUSizeMixedStruct; +} + +// Test LeanCtorScalar trait +crate::lean_domain_type! { /// Lean `BoolStruct` — structure BoolStruct where obj : Nat; b1 : Bool; b2 : Bool; b3 : Bool LeanBoolStruct; /// Lean `MultiU32Struct` — structure MultiU32Struct where obj : Nat; a : UInt32; b : UInt32; c : UInt32 LeanMultiU32Struct; } +// ScalarStruct: 1 obj, scalars: u64, u32, u8 +impl LeanCtorScalar for LeanScalarStruct { + const NUM_8B: usize = 1; + const NUM_4B: usize = 1; + fn as_ctor(&self) -> LeanCtor> { self.as_ctor() } +} + +// ExtScalarStruct: 1 obj, scalars: u64, f64, u32, f32, u16, u8 +impl LeanCtorScalar for LeanExtScalarStruct { + const NUM_8B: usize = 2; // u64 + f64 + const NUM_4B: usize = 2; // u32 + f32 + const NUM_2B: usize = 1; + fn as_ctor(&self) -> LeanCtor> { self.as_ctor() } +} + +// USizeStruct: 1 obj, 1 usize, u8 +impl LeanCtorScalar for LeanUSizeStruct { + const NUM_USIZE: usize = 1; + fn as_ctor(&self) -> LeanCtor> { self.as_ctor() } +} + +// USizeMixedStruct: 1 obj, 1 usize, scalars: u64, u32, bool +impl LeanCtorScalar for LeanUSizeMixedStruct { + const NUM_USIZE: usize = 1; + const NUM_8B: usize = 1; + const NUM_4B: usize = 1; + fn as_ctor(&self) -> LeanCtor> { self.as_ctor() } +} + +// BoolStruct: 1 obj, 3 bools +impl LeanCtorScalar for LeanBoolStruct { + fn as_ctor(&self) -> LeanCtor> { self.as_ctor() } +} + +// MultiU32Struct: 1 obj, 3 u32s +impl LeanCtorScalar for LeanMultiU32Struct { + const NUM_4B: usize = 3; + fn as_ctor(&self) -> LeanCtor> { self.as_ctor() } +} + /// Build a Lean Nat from a Rust Nat (delegates to `Nat::to_lean`). fn build_nat(n: &Nat) -> LeanOwned { n.to_lean().into() @@ -239,32 +283,25 @@ pub(crate) extern "C" fn rs_io_result_error_string( // LeanCtor scalar fields // ============================================================================= -/// Round-trip a ScalarStruct. -/// Lean layout: 1 obj field, then scalars by descending size: u64(0), u32(8), u8(12). +/// Round-trip a ScalarStruct using LeanCtorScalar trait. +/// Lean layout: 1 obj field, then scalars by descending size: u64, u32, u8. /// Total scalar size: 8 + 4 + 1 = 13 bytes. #[unsafe(no_mangle)] pub(crate) extern "C" fn rs_roundtrip_scalar_struct( ptr: LeanScalarStruct>, ) -> LeanScalarStruct { - let ctor = ptr.as_ctor(); - assert!(ctor.num_objs() == 1, "ScalarStruct should have 1 obj field"); - let obj_nat = Nat::from_obj(&ctor.get(0)); - let s = ctor.scalar_base(0); - let u64val = ctor.get_u64(s); - let u32val = ctor.get_u32(s + 8); - let u8val = ctor.get_u8(s + 12); - - let out = LeanCtor::alloc(0, 1, 13); - assert!( - out.num_objs() == 1, - "alloc'd ScalarStruct should have 1 obj field" - ); - let s = out.scalar_base(0); - out.set(0, build_nat(&obj_nat)); - out.set_u64(s, u64val); - out.set_u32(s + 8, u32val); - out.set_u8(s + 12, u8val); - LeanScalarStruct::new(out.into()) + let obj_nat = Nat::from_obj(&ptr.as_ctor().get(0)); + let u64val = ptr.get_64(0); + let u32val = ptr.get_32(0); + let u8val = ptr.get_8(0); + + let ctor = LeanCtor::alloc(0, 1, 13); + ctor.set(0, build_nat(&obj_nat)); + let out = LeanScalarStruct::new(ctor.into()); + out.set_64(0, u64val); + out.set_32(0, u32val); + out.set_8(0, u8val); + out } // ============================================================================= @@ -373,135 +410,125 @@ pub(crate) extern "C" fn rs_external_get_label( // Extended scalar struct roundtrip (u8, u16, u32, u64, f64, f32) // ============================================================================= -/// Round-trip an ExtScalarStruct. -/// Lean layout: 1 obj, then descending size: u64(0), f64(8), u32(16), f32(20), u16(24), u8(26). +/// Round-trip an ExtScalarStruct using LeanCtorScalar trait. +/// Lean layout: 1 obj, then descending size: u64, f64, u32, f32, u16, u8. /// Total scalar: 27 bytes. #[unsafe(no_mangle)] pub(crate) extern "C" fn rs_roundtrip_ext_scalar_struct( ptr: LeanExtScalarStruct>, ) -> LeanExtScalarStruct { - let ctor = ptr.as_ctor(); - assert!( - ctor.num_objs() == 1, - "ExtScalarStruct should have 1 obj field" - ); - let obj_nat = Nat::from_obj(&ctor.get(0)); - let s = ctor.scalar_base(0); - assert!(s == 8, "scalar_base(0) with 1 obj should be 8"); - let u64val = ctor.get_u64(s); - let fval = ctor.get_f64(s + 8); - let u32val = ctor.get_u32(s + 16); - let f32val = ctor.get_f32(s + 20); - let u16val = ctor.get_u16(s + 24); - let u8val = ctor.get_u8(s + 26); - - let out = LeanCtor::alloc(0, 1, 27); - let s = out.scalar_base(0); - out.set(0, build_nat(&obj_nat)); - out.set_u64(s, u64val); - out.set_f64(s + 8, fval); - out.set_u32(s + 16, u32val); - out.set_f32(s + 20, f32val); - out.set_u16(s + 24, u16val); - out.set_u8(s + 26, u8val); - LeanExtScalarStruct::new(out.into()) + let obj_nat = Nat::from_obj(&ptr.as_ctor().get(0)); + // 8-byte tier: u64 at index 0, f64 at index 1 + let u64val = ptr.get_64(0); + let fval = f64::from_bits(ptr.get_64(1)); + // 4-byte tier: u32 at index 0, f32 at index 1 + let u32val = ptr.get_32(0); + let f32val = f32::from_bits(ptr.get_32(1)); + let u16val = ptr.get_16(0); + let u8val = ptr.get_8(0); + + let ctor = LeanCtor::alloc(0, 1, 27); + ctor.set(0, build_nat(&obj_nat)); + let out = LeanExtScalarStruct::new(ctor.into()); + out.set_64(0, u64val); + out.set_64(1, fval.to_bits()); + out.set_32(0, u32val); + out.set_32(1, f32val.to_bits()); + out.set_16(0, u16val); + out.set_8(0, u8val); + out } // ============================================================================= // USize struct roundtrip // ============================================================================= -/// Round-trip a USizeStruct. -/// Lean layout: 1 obj field, then usize (slot 0), then u8. -/// Alloc: num_objs=1, scalar_sz=9 (8 for usize slot + 1 for u8). +/// Round-trip a USizeStruct using LeanCtorScalar trait. +/// Lean layout: 1 obj field, 1 usize field, then u8. +/// Alloc: num_objs=1, scalar_sz=9 (8 for usize + 1 for u8). #[unsafe(no_mangle)] pub(crate) extern "C" fn rs_roundtrip_usize_struct( ptr: LeanUSizeStruct>, ) -> LeanUSizeStruct { - let ctor = ptr.as_ctor(); - let obj_nat = Nat::from_obj(&ctor.get(0)); - let uval = ctor.get_usize(0); - let s = ctor.scalar_base(1); // 1 usize field - let u8val = ctor.get_u8(s); - - let out = LeanCtor::alloc(0, 1, 9); - out.set(0, build_nat(&obj_nat)); + let obj_nat = Nat::from_obj(&ptr.as_ctor().get(0)); + let uval = ptr.get_usize(0); + let u8val = ptr.get_8(0); + + let ctor = LeanCtor::alloc(0, 1, 9); + ctor.set(0, build_nat(&obj_nat)); + let out = LeanUSizeStruct::new(ctor.into()); out.set_usize(0, uval); - let s = out.scalar_base(1); - out.set_u8(s, u8val); - LeanUSizeStruct::new(out.into()) + out.set_8(0, u8val); + out } -/// Round-trip a USizeMixedStruct. -/// Lean layout: 1 obj field, 1 usize slot, then scalars: u64(+0), u32(+8), bool(+12). +/// Round-trip a USizeMixedStruct using LeanCtorScalar trait. +/// Lean layout: 1 obj field, 1 usize field, then scalars: u64, u32, bool. /// Total scalar size: 8 (usize) + 8 (u64) + 4 (u32) + 1 (bool) = 21 bytes. #[unsafe(no_mangle)] pub(crate) extern "C" fn rs_roundtrip_usize_mixed_struct( ptr: LeanUSizeMixedStruct>, ) -> LeanUSizeMixedStruct { - let ctor = ptr.as_ctor(); - // num_objs() should return 1 (only the Nat obj field), not counting USize - assert!( - ctor.num_objs() == 1, - "USizeMixedStruct should have 1 obj field" - ); - let obj_nat = Nat::from_obj(&ctor.get(0)); - let uval = ctor.get_usize(0); - let s = ctor.scalar_base(1); // 1 usize field - let u64val = ctor.get_u64(s); - let u32val = ctor.get_u32(s + 8); - let bval = ctor.get_bool(s + 12); - - let out = LeanCtor::alloc(0, 1, 21); - out.set(0, build_nat(&obj_nat)); + let obj_nat = Nat::from_obj(&ptr.as_ctor().get(0)); + let uval = ptr.get_usize(0); + let u64val = ptr.get_64(0); + let u32val = ptr.get_32(0); + let bval = ptr.get_8(0) != 0; + + let ctor = LeanCtor::alloc(0, 1, 21); + ctor.set(0, build_nat(&obj_nat)); + let out = LeanUSizeMixedStruct::new(ctor.into()); out.set_usize(0, uval); - let s = out.scalar_base(1); - out.set_u64(s, u64val); - out.set_u32(s + 8, u32val); - out.set_bool(s + 12, bval); - LeanUSizeMixedStruct::new(out.into()) + out.set_64(0, u64val); + out.set_32(0, u32val); + out.set_8(0, bval as u8); + out } // ============================================================================= // BoolStruct / MultiU32Struct roundtrips (batch scalar access) // ============================================================================= -/// Round-trip a BoolStruct using `get_scalars` / `set_scalars` with bool. -/// Lean layout: 1 obj field, then 3 Bool scalars at offsets 0, 1, 2. +/// Round-trip a BoolStruct using LeanCtorScalar trait. +/// Lean layout: 1 obj field, then 3 Bool scalars. /// Total scalar size: 3 bytes. #[unsafe(no_mangle)] pub(crate) extern "C" fn rs_roundtrip_bool_struct( ptr: LeanBoolStruct>, ) -> LeanBoolStruct { - let ctor = ptr.as_ctor(); - let obj_nat = Nat::from_obj(&ctor.get(0)); - let s = ctor.scalar_base(0); - let [b1, b2, b3] = ctor.get_scalars::<3, bool>(s); - - let out = LeanCtor::alloc(0, 1, 3); - let s = out.scalar_base(0); - out.set(0, build_nat(&obj_nat)); - out.set_scalars::<3, bool>(s, [b1, b2, b3]); - LeanBoolStruct::new(out.into()) -} - -/// Round-trip a MultiU32Struct using `scalars` / `set_scalars`. -/// Lean layout: 1 obj field, then 3 UInt32 scalars at offsets 0, 4, 8. + let obj_nat = Nat::from_obj(&ptr.as_ctor().get(0)); + let b1 = ptr.get_8(0); + let b2 = ptr.get_8(1); + let b3 = ptr.get_8(2); + + let ctor = LeanCtor::alloc(0, 1, 3); + ctor.set(0, build_nat(&obj_nat)); + let out = LeanBoolStruct::new(ctor.into()); + out.set_8(0, b1); + out.set_8(1, b2); + out.set_8(2, b3); + out +} + +/// Round-trip a MultiU32Struct using LeanCtorScalar trait. +/// Lean layout: 1 obj field, then 3 UInt32 scalars. /// Total scalar size: 12 bytes. #[unsafe(no_mangle)] pub(crate) extern "C" fn rs_roundtrip_multi_u32_struct( ptr: LeanMultiU32Struct>, ) -> LeanMultiU32Struct { - let ctor = ptr.as_ctor(); - let obj_nat = Nat::from_obj(&ctor.get(0)); - let s = ctor.scalar_base(0); - let [a, b, c] = ctor.get_scalars::<3, u32>(s); + let obj_nat = Nat::from_obj(&ptr.as_ctor().get(0)); + let a = ptr.get_32(0); + let b = ptr.get_32(1); + let c = ptr.get_32(2); - let out = LeanCtor::alloc(0, 1, 12); - let s = out.scalar_base(0); - out.set(0, build_nat(&obj_nat)); - out.set_scalars::<3, u32>(s, [a, b, c]); - LeanMultiU32Struct::new(out.into()) + let ctor = LeanCtor::alloc(0, 1, 12); + ctor.set(0, build_nat(&obj_nat)); + let out = LeanMultiU32Struct::new(ctor.into()); + out.set_32(0, a); + out.set_32(1, b); + out.set_32(2, c); + out } // ============================================================================= @@ -737,21 +764,12 @@ pub(crate) extern "C" fn rs_owned_prod_multiply(pair: LeanProd) -> Le Nat(fst.0 * snd.0).to_lean() } -/// Owned ScalarStruct: sum all scalar fields. -/// ScalarStruct { obj : Nat, u8val : UInt8, u32val : UInt32, u64val : UInt64 } -/// Lean reorders scalar fields by descending size: -/// u64val at scalar offset 0, u32val at offset 8, u8val at offset 12 -/// Note: roundtrip tests use declaration-order offsets (0, 1, 5) which happen -/// to roundtrip correctly because both read and write use the same offsets. -/// But for computing actual values, we must use the real layout. +/// Owned ScalarStruct: sum all scalar fields using LeanCtorScalar. #[unsafe(no_mangle)] pub(crate) extern "C" fn rs_owned_scalar_sum(ptr: LeanScalarStruct) -> u64 { - // Lean descending-size layout: u64(0), u32(8), u8(12) - let ctor = ptr.as_ctor(); - let s = ctor.scalar_base(0); - let u64val = ctor.get_u64(s); - let u32val = ctor.get_u32(s + 8) as u64; - let u8val = ctor.get_u8(s + 12) as u64; + let u64val = ptr.get_64(0); + let u32val = ptr.get_32(0) as u64; + let u8val = ptr.get_8(0) as u64; u64val + u32val + u8val // ptr drops here → lean_dec } From 9e7610657d7cc933618d41001f4da510ea6beeb3 Mon Sep 17 00:00:00 2001 From: samuelburnham <45365069+samuelburnham@users.noreply.github.com> Date: Tue, 14 Apr 2026 17:44:09 -0400 Subject: [PATCH 09/17] Fmt --- src/object.rs | 52 +++++++++++++++++++++++++++++++++++-------------- src/test_ffi.rs | 30 +++++++++++++++++++--------- 2 files changed, 58 insertions(+), 24 deletions(-) diff --git a/src/object.rs b/src/object.rs index 28519dd..ce4fc74 100644 --- a/src/object.rs +++ b/src/object.rs @@ -1085,7 +1085,6 @@ impl LeanCtor { self.get_u8(offset) != 0 } - // ------------------------------------------------------------------------- // Scalar field setters // ------------------------------------------------------------------------- @@ -1095,35 +1094,48 @@ impl LeanCtor { // Available on all R: LeanRef (not restricted to LeanOwned). pub fn set_u8(&self, offset: usize, val: u8) { - unsafe { include::lean_ctor_set_uint8(self.0.as_raw(), to_u32(offset), val); } + unsafe { + include::lean_ctor_set_uint8(self.0.as_raw(), to_u32(offset), val); + } } pub fn set_u16(&self, offset: usize, val: u16) { - unsafe { include::lean_ctor_set_uint16(self.0.as_raw(), to_u32(offset), val); } + unsafe { + include::lean_ctor_set_uint16(self.0.as_raw(), to_u32(offset), val); + } } pub fn set_u32(&self, offset: usize, val: u32) { - unsafe { include::lean_ctor_set_uint32(self.0.as_raw(), to_u32(offset), val); } + unsafe { + include::lean_ctor_set_uint32(self.0.as_raw(), to_u32(offset), val); + } } pub fn set_u64(&self, offset: usize, val: u64) { - unsafe { include::lean_ctor_set_uint64(self.0.as_raw(), to_u32(offset), val); } + unsafe { + include::lean_ctor_set_uint64(self.0.as_raw(), to_u32(offset), val); + } } pub fn set_f64(&self, offset: usize, val: f64) { - unsafe { include::lean_ctor_set_float(self.0.as_raw(), to_u32(offset), val); } + unsafe { + include::lean_ctor_set_float(self.0.as_raw(), to_u32(offset), val); + } } pub fn set_f32(&self, offset: usize, val: f32) { - unsafe { include::lean_ctor_set_float32(self.0.as_raw(), to_u32(offset), val); } + unsafe { + include::lean_ctor_set_float32(self.0.as_raw(), to_u32(offset), val); + } } /// Set a `USize` field. `USize` fields occupy pointer-width entries in /// the data area right after object fields. `index` is relative to /// the first `USize` entry; the object field count is read from the header /// and added internally. pub fn set_usize(&self, index: usize, val: usize) { - unsafe { include::lean_ctor_set_usize(self.0.as_raw(), to_u32(self.num_objs() + index), val); } + unsafe { + include::lean_ctor_set_usize(self.0.as_raw(), to_u32(self.num_objs() + index), val); + } } /// Write a single `Bool` scalar field (`uint8_t`, 0 or 1). pub fn set_bool(&self, offset: usize, val: bool) { self.set_u8(offset, val as u8); } - } impl LeanCtor { @@ -1241,28 +1253,38 @@ pub trait LeanCtorScalar { // -- 4-byte tier (UInt32 / Float32) -- fn get_32(&self, i: usize) -> u32 { - self.as_ctor().get_u32(self.scalar_base() + Self::NUM_8B * 8 + i * 4) + self.as_ctor() + .get_u32(self.scalar_base() + Self::NUM_8B * 8 + i * 4) } fn set_32(&self, i: usize, val: u32) { - self.as_ctor().set_u32(self.scalar_base() + Self::NUM_8B * 8 + i * 4, val) + self.as_ctor() + .set_u32(self.scalar_base() + Self::NUM_8B * 8 + i * 4, val) } // -- 2-byte tier (UInt16) -- fn get_16(&self, i: usize) -> u16 { - self.as_ctor().get_u16(self.scalar_base() + Self::NUM_8B * 8 + Self::NUM_4B * 4 + i * 2) + self.as_ctor() + .get_u16(self.scalar_base() + Self::NUM_8B * 8 + Self::NUM_4B * 4 + i * 2) } fn set_16(&self, i: usize, val: u16) { - self.as_ctor().set_u16(self.scalar_base() + Self::NUM_8B * 8 + Self::NUM_4B * 4 + i * 2, val) + self.as_ctor().set_u16( + self.scalar_base() + Self::NUM_8B * 8 + Self::NUM_4B * 4 + i * 2, + val, + ) } // -- 1-byte tier (UInt8 / Bool) -- fn get_8(&self, i: usize) -> u8 { - self.as_ctor().get_u8(self.scalar_base() + Self::NUM_8B * 8 + Self::NUM_4B * 4 + Self::NUM_2B * 2 + i) + self.as_ctor() + .get_u8(self.scalar_base() + Self::NUM_8B * 8 + Self::NUM_4B * 4 + Self::NUM_2B * 2 + i) } fn set_8(&self, i: usize, val: u8) { - self.as_ctor().set_u8(self.scalar_base() + Self::NUM_8B * 8 + Self::NUM_4B * 4 + Self::NUM_2B * 2 + i, val) + self.as_ctor().set_u8( + self.scalar_base() + Self::NUM_8B * 8 + Self::NUM_4B * 4 + Self::NUM_2B * 2 + i, + val, + ) } } diff --git a/src/test_ffi.rs b/src/test_ffi.rs index 47fa223..81ba0fb 100644 --- a/src/test_ffi.rs +++ b/src/test_ffi.rs @@ -12,9 +12,9 @@ use std::sync::LazyLock; use crate::include; use crate::nat::Nat; use crate::object::{ - ExternalClass, LeanArray, LeanBool, LeanBorrowed, LeanByteArray, LeanCtor, - LeanCtorScalar, LeanExcept, LeanExternal, LeanIOResult, LeanList, LeanNat, LeanOption, - LeanOwned, LeanProd, LeanRef, LeanString, + ExternalClass, LeanArray, LeanBool, LeanBorrowed, LeanByteArray, LeanCtor, LeanCtorScalar, + LeanExcept, LeanExternal, LeanIOResult, LeanList, LeanNat, LeanOption, LeanOwned, LeanProd, + LeanRef, LeanString, }; // ============================================================================= @@ -50,7 +50,9 @@ crate::lean_domain_type! { impl LeanCtorScalar for LeanScalarStruct { const NUM_8B: usize = 1; const NUM_4B: usize = 1; - fn as_ctor(&self) -> LeanCtor> { self.as_ctor() } + fn as_ctor(&self) -> LeanCtor> { + self.as_ctor() + } } // ExtScalarStruct: 1 obj, scalars: u64, f64, u32, f32, u16, u8 @@ -58,13 +60,17 @@ impl LeanCtorScalar for LeanExtScalarStruct { const NUM_8B: usize = 2; // u64 + f64 const NUM_4B: usize = 2; // u32 + f32 const NUM_2B: usize = 1; - fn as_ctor(&self) -> LeanCtor> { self.as_ctor() } + fn as_ctor(&self) -> LeanCtor> { + self.as_ctor() + } } // USizeStruct: 1 obj, 1 usize, u8 impl LeanCtorScalar for LeanUSizeStruct { const NUM_USIZE: usize = 1; - fn as_ctor(&self) -> LeanCtor> { self.as_ctor() } + fn as_ctor(&self) -> LeanCtor> { + self.as_ctor() + } } // USizeMixedStruct: 1 obj, 1 usize, scalars: u64, u32, bool @@ -72,18 +78,24 @@ impl LeanCtorScalar for LeanUSizeMixedStruct { const NUM_USIZE: usize = 1; const NUM_8B: usize = 1; const NUM_4B: usize = 1; - fn as_ctor(&self) -> LeanCtor> { self.as_ctor() } + fn as_ctor(&self) -> LeanCtor> { + self.as_ctor() + } } // BoolStruct: 1 obj, 3 bools impl LeanCtorScalar for LeanBoolStruct { - fn as_ctor(&self) -> LeanCtor> { self.as_ctor() } + fn as_ctor(&self) -> LeanCtor> { + self.as_ctor() + } } // MultiU32Struct: 1 obj, 3 u32s impl LeanCtorScalar for LeanMultiU32Struct { const NUM_4B: usize = 3; - fn as_ctor(&self) -> LeanCtor> { self.as_ctor() } + fn as_ctor(&self) -> LeanCtor> { + self.as_ctor() + } } /// Build a Lean Nat from a Rust Nat (delegates to `Nat::to_lean`). From 7d00b4529ce34ca7c9d6732c78b98460d2e34116 Mon Sep 17 00:00:00 2001 From: samuelburnham <45365069+samuelburnham@users.noreply.github.com> Date: Tue, 14 Apr 2026 17:53:37 -0400 Subject: [PATCH 10/17] Add helper macro --- src/object.rs | 27 ++++++++++++++++++++++++++ src/test_ffi.rs | 51 ++++++------------------------------------------- 2 files changed, 33 insertions(+), 45 deletions(-) diff --git a/src/object.rs b/src/object.rs index ce4fc74..c134d1f 100644 --- a/src/object.rs +++ b/src/object.rs @@ -1288,6 +1288,33 @@ pub trait LeanCtorScalar { } } +/// Byte offset from `lean_ctor_obj_cptr` where fixed-size scalar fields begin. +/// +/// Use this for multi-constructor inductives where [`LeanCtorScalar`] doesn't +/// apply because the scalar layout varies per constructor tag. `num_usize` is +/// the number of `USize` fields (0 for most types). +#[inline] +pub fn scalar_base(ctor: &LeanCtor, num_usize: usize) -> usize { + (ctor.num_objs() + num_usize) * size_of::() +} + +/// Implement [`LeanCtorScalar`] for a `lean_domain_type!` structure type. +/// +/// ```ignore +/// impl_ctor_scalar!(LeanFoo { NUM_8B = 1, NUM_4B = 2 }); +/// ``` +#[macro_export] +macro_rules! impl_ctor_scalar { + ($ty:ident { $($const:ident = $val:expr),* $(,)? }) => { + impl $crate::object::LeanCtorScalar for $ty { + $(const $const: usize = $val;)* + fn as_ctor(&self) -> $crate::object::LeanCtor<$crate::object::LeanBorrowed<'_>> { + self.as_ctor() + } + } + }; +} + // ============================================================================= // LeanExternal — External objects (tag LEAN_TAG_EXTERNAL) // ============================================================================= diff --git a/src/test_ffi.rs b/src/test_ffi.rs index 81ba0fb..49af4d6 100644 --- a/src/test_ffi.rs +++ b/src/test_ffi.rs @@ -47,56 +47,17 @@ crate::lean_domain_type! { } // ScalarStruct: 1 obj, scalars: u64, u32, u8 -impl LeanCtorScalar for LeanScalarStruct { - const NUM_8B: usize = 1; - const NUM_4B: usize = 1; - fn as_ctor(&self) -> LeanCtor> { - self.as_ctor() - } -} - +crate::impl_ctor_scalar!(LeanScalarStruct { NUM_8B = 1, NUM_4B = 1 }); // ExtScalarStruct: 1 obj, scalars: u64, f64, u32, f32, u16, u8 -impl LeanCtorScalar for LeanExtScalarStruct { - const NUM_8B: usize = 2; // u64 + f64 - const NUM_4B: usize = 2; // u32 + f32 - const NUM_2B: usize = 1; - fn as_ctor(&self) -> LeanCtor> { - self.as_ctor() - } -} - +crate::impl_ctor_scalar!(LeanExtScalarStruct { NUM_8B = 2, NUM_4B = 2, NUM_2B = 1 }); // USizeStruct: 1 obj, 1 usize, u8 -impl LeanCtorScalar for LeanUSizeStruct { - const NUM_USIZE: usize = 1; - fn as_ctor(&self) -> LeanCtor> { - self.as_ctor() - } -} - +crate::impl_ctor_scalar!(LeanUSizeStruct { NUM_USIZE = 1 }); // USizeMixedStruct: 1 obj, 1 usize, scalars: u64, u32, bool -impl LeanCtorScalar for LeanUSizeMixedStruct { - const NUM_USIZE: usize = 1; - const NUM_8B: usize = 1; - const NUM_4B: usize = 1; - fn as_ctor(&self) -> LeanCtor> { - self.as_ctor() - } -} - +crate::impl_ctor_scalar!(LeanUSizeMixedStruct { NUM_USIZE = 1, NUM_8B = 1, NUM_4B = 1 }); // BoolStruct: 1 obj, 3 bools -impl LeanCtorScalar for LeanBoolStruct { - fn as_ctor(&self) -> LeanCtor> { - self.as_ctor() - } -} - +crate::impl_ctor_scalar!(LeanBoolStruct {}); // MultiU32Struct: 1 obj, 3 u32s -impl LeanCtorScalar for LeanMultiU32Struct { - const NUM_4B: usize = 3; - fn as_ctor(&self) -> LeanCtor> { - self.as_ctor() - } -} +crate::impl_ctor_scalar!(LeanMultiU32Struct { NUM_4B = 3 }); /// Build a Lean Nat from a Rust Nat (delegates to `Nat::to_lean`). fn build_nat(n: &Nat) -> LeanOwned { From 600320b1a9f058ebcd07d9e1a50c6d84bdf94e62 Mon Sep 17 00:00:00 2001 From: samuelburnham <45365069+samuelburnham@users.noreply.github.com> Date: Tue, 14 Apr 2026 22:46:48 -0400 Subject: [PATCH 11/17] Add `alloc` method and support inductives --- Tests/FFI.lean | 22 +++++ Tests/Gen.lean | 24 +++++ src/lib.rs | 8 ++ src/object.rs | 155 ++++++++++++++++++++--------- src/test_ffi.rs | 257 +++++++++++++++++++++++++++++++++++------------- 5 files changed, 350 insertions(+), 116 deletions(-) diff --git a/Tests/FFI.lean b/Tests/FFI.lean index bb9835e..1462aef 100644 --- a/Tests/FFI.lean +++ b/Tests/FFI.lean @@ -74,6 +74,15 @@ opaque roundtripBoolStruct : @& BoolStruct → BoolStruct @[extern "rs_roundtrip_multi_u32_struct"] opaque roundtripMultiU32Struct : @& MultiU32Struct → MultiU32Struct +@[extern "rs_roundtrip_test_inductive"] +opaque roundtripTestInductive : @& TestInductive → TestInductive + +@[extern "rs_roundtrip_outer"] +opaque roundtripOuter : @& Outer → Outer + +@[extern "rs_roundtrip_inductive_holder"] +opaque roundtripInductiveHolder : @& InductiveHolder → InductiveHolder + @[extern "rs_roundtrip_float"] opaque roundtripFloat : Float → Float @@ -378,6 +387,19 @@ def borrowedRoundtripTests : TestSeq := test "MultiU32Struct zeros" (roundtripMultiU32Struct ⟨0, 0, 0, 0⟩ == ⟨0, 0, 0, 0⟩) ++ test "MultiU32Struct max" (roundtripMultiU32Struct ⟨100, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF⟩ == ⟨100, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF⟩) ++ test "MultiU32Struct mixed" (roundtripMultiU32Struct ⟨1, 42, 0, 99⟩ == ⟨1, 42, 0, 99⟩) ++ + -- TestInductive: multi-constructor inductive with non-zero tags + test "TestInductive empty" (roundtripTestInductive .empty == .empty) ++ + test "TestInductive withScalars" (roundtripTestInductive (.withScalars 42 99) == .withScalars 42 99) ++ + test "TestInductive withScalars max" (roundtripTestInductive (.withScalars 0xFFFFFFFFFFFFFFFF 0) == .withScalars 0xFFFFFFFFFFFFFFFF 0) ++ + test "TestInductive withMixed" (roundtripTestInductive (.withMixed 7 0xFFFFFFFF true) == .withMixed 7 0xFFFFFFFF true) ++ + test "TestInductive withMixed false" (roundtripTestInductive (.withMixed 0 0 false) == .withMixed 0 0 false) ++ + -- Outer: structure containing another structure + test "Outer zeros" (roundtripOuter ⟨⟨0, 0, 0, 0⟩, "", 0⟩ == ⟨⟨0, 0, 0, 0⟩, "", 0⟩) ++ + test "Outer mixed" (roundtripOuter ⟨⟨42, 0xFF, 0xFFFFFFFF, 0xFFFFFFFFFFFFFFFF⟩, "hello", 99⟩ == ⟨⟨42, 0xFF, 0xFFFFFFFF, 0xFFFFFFFFFFFFFFFF⟩, "hello", 99⟩) ++ + -- InductiveHolder: structure containing an inductive + test "InductiveHolder empty" (roundtripInductiveHolder ⟨.empty, 0⟩ == ⟨.empty, 0⟩) ++ + test "InductiveHolder withScalars" (roundtripInductiveHolder ⟨.withScalars 10 20, 42⟩ == ⟨.withScalars 10 20, 42⟩) ++ + test "InductiveHolder withMixed" (roundtripInductiveHolder ⟨.withMixed 7 99 true, 0xFFFFFFFF⟩ == ⟨.withMixed 7 99 true, 0xFFFFFFFF⟩) ++ test "External all fields" (externalAllFields (mkRustData 42 99 "hello") == "42:99:hello") ++ test "External all fields zeros" (externalAllFields (mkRustData 0 0 "") == "0:0:") ++ test "External large u64" (rustDataGetX (mkRustData 0xFFFFFFFFFFFFFFFF 0 "test") == 0xFFFFFFFFFFFFFFFF) ++ diff --git a/Tests/Gen.lean b/Tests/Gen.lean index 6f26372..06b8ed6 100644 --- a/Tests/Gen.lean +++ b/Tests/Gen.lean @@ -154,6 +154,30 @@ structure MultiU32Struct where c : UInt32 deriving Repr, BEq, DecidableEq, Inhabited +/-- Inductive with multiple constructors for testing non-zero TAG. + - empty: tag 0, no fields (scalar representation) + - withScalars: tag 1, 2 u64 fields + - withMixed: tag 2, 1 obj field + 1 u32 + 1 bool -/ +inductive TestInductive + | empty + | withScalars (a b : UInt64) + | withMixed (obj : Nat) (x : UInt32) (flag : Bool) +deriving Repr, BEq, DecidableEq, Inhabited + +/-- Structure containing another structure as an object field. + Tests nested structure access via the trait API. -/ +structure Outer where + inner : ScalarStruct + label : String + count : UInt64 +deriving Repr, BEq, DecidableEq, Inhabited + +/-- Structure containing an inductive as an object field. -/ +structure InductiveHolder where + value : TestInductive + tag_copy : UInt32 +deriving Repr, BEq, DecidableEq, Inhabited + /-! ## Shrinkable instances -/ instance : Shrinkable Nat where diff --git a/src/lib.rs b/src/lib.rs index eca14e5..2a83e4f 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -105,6 +105,14 @@ macro_rules! lean_domain_type { } } + impl<'a> $name<$crate::object::LeanBorrowed<'a>> { + /// Wrap a borrowed `LeanCtor` as this domain type. + #[inline] + pub fn from_ctor(ctor: $crate::object::LeanCtor<$crate::object::LeanBorrowed<'a>>) -> Self { + Self(unsafe { $crate::object::LeanBorrowed::from_raw(ctor.as_raw()) }) + } + } + impl $name<$crate::object::LeanOwned> { /// Wrap an owned `LeanOwned` value. #[inline] diff --git a/src/object.rs b/src/object.rs index c134d1f..4d623e3 100644 --- a/src/object.rs +++ b/src/object.rs @@ -34,7 +34,7 @@ mod tags { use tags::*; /// Constructor tag for `IO.Error.userError`. -const IO_ERROR_USER_ERROR_TAG: u8 = 7; +const IO_ERROR_USER_ERROR_TAG: usize = 7; /// Convert a `usize` to `u32` for the Lean C API, panicking on overflow. #[inline] @@ -1001,8 +1001,8 @@ impl From> for LeanOwned { /// // Rust — using LeanCtorScalar trait (no manual offsets) /// let name = foo.as_ctor().get(0); // object field /// let size = foo.get_usize(0); // first USize field -/// let count = foo.get_u64(0); // first u64 scalar -/// let flag = foo.get_bool(0); // first bool scalar +/// let count = foo.get_num_64(0); // first 8-byte scalar +/// let flag = foo.get_num_8(0); // first 1-byte scalar /// ``` #[repr(transparent)] pub struct LeanCtor(R); @@ -1150,10 +1150,9 @@ impl LeanCtor { } /// Allocate a new constructor object. - pub fn alloc(tag: u8, num_objs: usize, scalar_size: usize) -> Self { - let obj = unsafe { - include::lean_alloc_ctor(u32::from(tag), to_u32(num_objs), to_u32(scalar_size)) - }; + pub fn alloc(tag: usize, num_objs: usize, scalar_size: usize) -> Self { + let obj = + unsafe { include::lean_alloc_ctor(to_u32(tag), to_u32(num_objs), to_u32(scalar_size)) }; Self(LeanOwned(obj)) } @@ -1192,20 +1191,16 @@ impl From> for LeanOwned { /// Trait for type-indexed scalar field access on domain types. /// /// Implement this on a `lean_domain_type!` wrapper to get `get_*`/`set_*` -/// methods that index by type rather than byte offset. Set the associated -/// constants to match your Lean structure's field counts: +/// methods that index by size tier rather than byte offset. Works for +/// structures and for specific variants of multi-constructor inductives. +/// Use [`impl_ctor_scalar!`] to reduce boilerplate: /// /// ```ignore /// lean_domain_type! { LeanFoo; } +/// impl_ctor_scalar!(LeanFoo { NUM_OBJ = 1, NUM_USIZE = 1, NUM_64 = 1 }); /// -/// impl LeanCtorScalar for LeanFoo { -/// const NUM_USIZE: usize = 1; -/// const NUM_U64: usize = 1; -/// fn as_ctor(&self) -> LeanCtor> { self.as_ctor() } -/// } -/// -/// let count = foo.get_u64(0); -/// let flag = foo.get_bool(0); +/// let count = foo.get_num_64(0); +/// let flag = foo.get_num_8(0); /// ``` /// /// Within each size tier, Lean preserves **declaration order** — it does not @@ -1213,23 +1208,29 @@ impl From> for LeanOwned { /// natural integer type for that width. Use `f64::from_bits()` / /// `f64::to_bits()` for float fields, and `val != 0` for bool fields. pub trait LeanCtorScalar { + /// Constructor tag (0 for most structures). + const TAG: usize = 0; + /// Number of object fields (`lean_object*`). + const NUM_OBJ: usize = 0; /// Number of `USize` fields (pointer-width, after object fields). const NUM_USIZE: usize = 0; /// Number of 8-byte scalar fields (`UInt64` + `Float`). - const NUM_8B: usize = 0; + const NUM_64: usize = 0; /// Number of 4-byte scalar fields (`UInt32` + `Float32`). - const NUM_4B: usize = 0; + const NUM_32: usize = 0; /// Number of 2-byte scalar fields (`UInt16`). - const NUM_2B: usize = 0; - // 1-byte count not needed — it's the last tier. + const NUM_16: usize = 0; + /// Number of 1-byte scalar fields (`UInt8` + `Bool`). + const NUM_8: usize = 0; - /// Access the underlying constructor. Already generated by `lean_domain_type!`. + /// Access the underlying constructor. Provided automatically by + /// [`impl_ctor_scalar!`] — users should not need to implement this. fn as_ctor(&self) -> LeanCtor>; - /// Byte offset from `lean_ctor_obj_cptr` where fixed-size scalar fields begin. - /// Reads the object field count from the header and adds `NUM_USIZE`. - fn scalar_base(&self) -> usize { - (self.as_ctor().num_objs() + Self::NUM_USIZE) * size_of::() + /// Set the `i`-th object field. Takes ownership of `val`. + fn set_obj(&self, i: usize, val: impl Into) { + let val: LeanOwned = val.into(); + unsafe { include::lean_ctor_set(self.as_ctor().as_raw(), to_u32(i), val.into_raw()) } } // -- USize fields -- @@ -1243,46 +1244,66 @@ pub trait LeanCtorScalar { // -- 8-byte tier (UInt64 / Float) -- - fn get_64(&self, i: usize) -> u64 { - self.as_ctor().get_u64(self.scalar_base() + i * 8) + fn get_num_64(&self, i: usize) -> u64 { + self.as_ctor() + .get_u64(scalar_base(&self.as_ctor(), Self::NUM_USIZE) + i * 8) } - fn set_64(&self, i: usize, val: u64) { - self.as_ctor().set_u64(self.scalar_base() + i * 8, val) + fn set_num_64(&self, i: usize, val: u64) { + self.as_ctor() + .set_u64(scalar_base(&self.as_ctor(), Self::NUM_USIZE) + i * 8, val) } // -- 4-byte tier (UInt32 / Float32) -- - fn get_32(&self, i: usize) -> u32 { + fn get_num_32(&self, i: usize) -> u32 { self.as_ctor() - .get_u32(self.scalar_base() + Self::NUM_8B * 8 + i * 4) + .get_u32(scalar_base(&self.as_ctor(), Self::NUM_USIZE) + Self::NUM_64 * 8 + i * 4) } - fn set_32(&self, i: usize, val: u32) { - self.as_ctor() - .set_u32(self.scalar_base() + Self::NUM_8B * 8 + i * 4, val) + fn set_num_32(&self, i: usize, val: u32) { + self.as_ctor().set_u32( + scalar_base(&self.as_ctor(), Self::NUM_USIZE) + Self::NUM_64 * 8 + i * 4, + val, + ) } // -- 2-byte tier (UInt16) -- - fn get_16(&self, i: usize) -> u16 { - self.as_ctor() - .get_u16(self.scalar_base() + Self::NUM_8B * 8 + Self::NUM_4B * 4 + i * 2) + fn get_num_16(&self, i: usize) -> u16 { + self.as_ctor().get_u16( + scalar_base(&self.as_ctor(), Self::NUM_USIZE) + + Self::NUM_64 * 8 + + Self::NUM_32 * 4 + + i * 2, + ) } - fn set_16(&self, i: usize, val: u16) { + fn set_num_16(&self, i: usize, val: u16) { self.as_ctor().set_u16( - self.scalar_base() + Self::NUM_8B * 8 + Self::NUM_4B * 4 + i * 2, + scalar_base(&self.as_ctor(), Self::NUM_USIZE) + + Self::NUM_64 * 8 + + Self::NUM_32 * 4 + + i * 2, val, ) } // -- 1-byte tier (UInt8 / Bool) -- - fn get_8(&self, i: usize) -> u8 { - self.as_ctor() - .get_u8(self.scalar_base() + Self::NUM_8B * 8 + Self::NUM_4B * 4 + Self::NUM_2B * 2 + i) + fn get_num_8(&self, i: usize) -> u8 { + self.as_ctor().get_u8( + scalar_base(&self.as_ctor(), Self::NUM_USIZE) + + Self::NUM_64 * 8 + + Self::NUM_32 * 4 + + Self::NUM_16 * 2 + + i, + ) } - fn set_8(&self, i: usize, val: u8) { + fn set_num_8(&self, i: usize, val: u8) { self.as_ctor().set_u8( - self.scalar_base() + Self::NUM_8B * 8 + Self::NUM_4B * 4 + Self::NUM_2B * 2 + i, + scalar_base(&self.as_ctor(), Self::NUM_USIZE) + + Self::NUM_64 * 8 + + Self::NUM_32 * 4 + + Self::NUM_16 * 2 + + i, val, ) } @@ -1298,10 +1319,32 @@ pub fn scalar_base(ctor: &LeanCtor, num_usize: usize) -> usize { (ctor.num_objs() + num_usize) * size_of::() } -/// Implement [`LeanCtorScalar`] for a `lean_domain_type!` structure type. +/// Implement [`LeanCtorScalar`] for a `lean_domain_type!` type and generate +/// an `alloc()` constructor on the owned variant. +/// +/// Works for structures (tag 0) and specific variants of multi-constructor +/// inductives (set `TAG` to the variant's constructor tag). /// /// ```ignore -/// impl_ctor_scalar!(LeanFoo { NUM_8B = 1, NUM_4B = 2 }); +/// // Structure (tag defaults to 0): +/// impl_ctor_scalar!(LeanFoo { NUM_OBJ = 1, NUM_64 = 2 }); +/// +/// // For an inductive like: +/// // inductive CompareResult +/// // | match +/// // | mismatch (a b c : UInt64) +/// // | notFound +/// // where the `mismatch` variant has tag 1 and 3 u64 fields: +/// impl_ctor_scalar!(LeanMismatch { TAG = 1, NUM_64 = 3 }); +/// +/// let foo = LeanFoo::alloc(); +/// foo.set_obj(0, some_val); +/// foo.set_num_64(0, 42); +/// +/// let m = LeanMismatch::alloc(); +/// m.set_num_64(0, lean_size); +/// m.set_num_64(1, rust_size); +/// m.set_num_64(2, first_diff); /// ``` #[macro_export] macro_rules! impl_ctor_scalar { @@ -1312,6 +1355,24 @@ macro_rules! impl_ctor_scalar { self.as_ctor() } } + + impl $ty<$crate::object::LeanOwned> { + /// Allocate a new constructor with the layout defined by [`LeanCtorScalar`]. + pub fn alloc() -> Self { + use $crate::object::LeanCtorScalar; + let scalar_size = + ::NUM_USIZE * std::mem::size_of::() + + ::NUM_64 * 8 + + ::NUM_32 * 4 + + ::NUM_16 * 2 + + ::NUM_8; + Self::new($crate::object::LeanCtor::alloc( + ::TAG, + ::NUM_OBJ, + scalar_size, + ).into()) + } + } }; } diff --git a/src/test_ffi.rs b/src/test_ffi.rs index 49af4d6..ceee753 100644 --- a/src/test_ffi.rs +++ b/src/test_ffi.rs @@ -44,20 +44,38 @@ crate::lean_domain_type! { LeanBoolStruct; /// Lean `MultiU32Struct` — structure MultiU32Struct where obj : Nat; a : UInt32; b : UInt32; c : UInt32 LeanMultiU32Struct; + /// Lean `TestInductive` — the full inductive (for tag dispatch) + LeanTestInductive; + /// Lean `TestInductive.withScalars` variant (tag 1, 2 u64 fields) + LeanTestInductiveWithScalars; + /// Lean `TestInductive.withMixed` variant (tag 2, 1 obj + 1 u32 + 1 bool) + LeanTestInductiveWithMixed; + /// Lean `Outer` — structure containing ScalarStruct + String + UInt64 + LeanOuter; + /// Lean `InductiveHolder` — structure containing TestInductive + UInt32 + LeanInductiveHolder; } // ScalarStruct: 1 obj, scalars: u64, u32, u8 -crate::impl_ctor_scalar!(LeanScalarStruct { NUM_8B = 1, NUM_4B = 1 }); +crate::impl_ctor_scalar!(LeanScalarStruct { NUM_OBJ = 1, NUM_64 = 1, NUM_32 = 1, NUM_8 = 1 }); // ExtScalarStruct: 1 obj, scalars: u64, f64, u32, f32, u16, u8 -crate::impl_ctor_scalar!(LeanExtScalarStruct { NUM_8B = 2, NUM_4B = 2, NUM_2B = 1 }); +crate::impl_ctor_scalar!(LeanExtScalarStruct { NUM_OBJ = 1, NUM_64 = 2, NUM_32 = 2, NUM_16 = 1, NUM_8 = 1 }); // USizeStruct: 1 obj, 1 usize, u8 -crate::impl_ctor_scalar!(LeanUSizeStruct { NUM_USIZE = 1 }); +crate::impl_ctor_scalar!(LeanUSizeStruct { NUM_OBJ = 1, NUM_USIZE = 1, NUM_8 = 1 }); // USizeMixedStruct: 1 obj, 1 usize, scalars: u64, u32, bool -crate::impl_ctor_scalar!(LeanUSizeMixedStruct { NUM_USIZE = 1, NUM_8B = 1, NUM_4B = 1 }); +crate::impl_ctor_scalar!(LeanUSizeMixedStruct { NUM_OBJ = 1, NUM_USIZE = 1, NUM_64 = 1, NUM_32 = 1, NUM_8 = 1 }); // BoolStruct: 1 obj, 3 bools -crate::impl_ctor_scalar!(LeanBoolStruct {}); +crate::impl_ctor_scalar!(LeanBoolStruct { NUM_OBJ = 1, NUM_8 = 3 }); // MultiU32Struct: 1 obj, 3 u32s -crate::impl_ctor_scalar!(LeanMultiU32Struct { NUM_4B = 3 }); +crate::impl_ctor_scalar!(LeanMultiU32Struct { NUM_OBJ = 1, NUM_32 = 3 }); +// Outer: 2 obj fields (inner, label), 1 u64 scalar +crate::impl_ctor_scalar!(LeanOuter { NUM_OBJ = 2, NUM_64 = 1 }); +// InductiveHolder: 1 obj field (value), 1 u32 scalar +crate::impl_ctor_scalar!(LeanInductiveHolder { NUM_OBJ = 1, NUM_32 = 1 }); +// TestInductive.withScalars: tag 1, 0 obj fields, 2 u64 scalars +crate::impl_ctor_scalar!(LeanTestInductiveWithScalars { TAG = 1, NUM_64 = 2 }); +// TestInductive.withMixed: tag 2, 1 obj field, 1 u32 + 1 bool +crate::impl_ctor_scalar!(LeanTestInductiveWithMixed { TAG = 2, NUM_OBJ = 1, NUM_32 = 1, NUM_8 = 1 }); /// Build a Lean Nat from a Rust Nat (delegates to `Nat::to_lean`). fn build_nat(n: &Nat) -> LeanOwned { @@ -264,16 +282,15 @@ pub(crate) extern "C" fn rs_roundtrip_scalar_struct( ptr: LeanScalarStruct>, ) -> LeanScalarStruct { let obj_nat = Nat::from_obj(&ptr.as_ctor().get(0)); - let u64val = ptr.get_64(0); - let u32val = ptr.get_32(0); - let u8val = ptr.get_8(0); - - let ctor = LeanCtor::alloc(0, 1, 13); - ctor.set(0, build_nat(&obj_nat)); - let out = LeanScalarStruct::new(ctor.into()); - out.set_64(0, u64val); - out.set_32(0, u32val); - out.set_8(0, u8val); + let u64val = ptr.get_num_64(0); + let u32val = ptr.get_num_32(0); + let u8val = ptr.get_num_8(0); + + let out = LeanScalarStruct::alloc(); + out.set_obj(0, build_nat(&obj_nat)); + out.set_num_64(0, u64val); + out.set_num_32(0, u32val); + out.set_num_8(0, u8val); out } @@ -392,23 +409,22 @@ pub(crate) extern "C" fn rs_roundtrip_ext_scalar_struct( ) -> LeanExtScalarStruct { let obj_nat = Nat::from_obj(&ptr.as_ctor().get(0)); // 8-byte tier: u64 at index 0, f64 at index 1 - let u64val = ptr.get_64(0); - let fval = f64::from_bits(ptr.get_64(1)); + let u64val = ptr.get_num_64(0); + let fval = f64::from_bits(ptr.get_num_64(1)); // 4-byte tier: u32 at index 0, f32 at index 1 - let u32val = ptr.get_32(0); - let f32val = f32::from_bits(ptr.get_32(1)); - let u16val = ptr.get_16(0); - let u8val = ptr.get_8(0); - - let ctor = LeanCtor::alloc(0, 1, 27); - ctor.set(0, build_nat(&obj_nat)); - let out = LeanExtScalarStruct::new(ctor.into()); - out.set_64(0, u64val); - out.set_64(1, fval.to_bits()); - out.set_32(0, u32val); - out.set_32(1, f32val.to_bits()); - out.set_16(0, u16val); - out.set_8(0, u8val); + let u32val = ptr.get_num_32(0); + let f32val = f32::from_bits(ptr.get_num_32(1)); + let u16val = ptr.get_num_16(0); + let u8val = ptr.get_num_8(0); + + let out = LeanExtScalarStruct::alloc(); + out.set_obj(0, build_nat(&obj_nat)); + out.set_num_64(0, u64val); + out.set_num_64(1, fval.to_bits()); + out.set_num_32(0, u32val); + out.set_num_32(1, f32val.to_bits()); + out.set_num_16(0, u16val); + out.set_num_8(0, u8val); out } @@ -425,13 +441,12 @@ pub(crate) extern "C" fn rs_roundtrip_usize_struct( ) -> LeanUSizeStruct { let obj_nat = Nat::from_obj(&ptr.as_ctor().get(0)); let uval = ptr.get_usize(0); - let u8val = ptr.get_8(0); + let u8val = ptr.get_num_8(0); - let ctor = LeanCtor::alloc(0, 1, 9); - ctor.set(0, build_nat(&obj_nat)); - let out = LeanUSizeStruct::new(ctor.into()); + let out = LeanUSizeStruct::alloc(); + out.set_obj(0, build_nat(&obj_nat)); out.set_usize(0, uval); - out.set_8(0, u8val); + out.set_num_8(0, u8val); out } @@ -444,17 +459,16 @@ pub(crate) extern "C" fn rs_roundtrip_usize_mixed_struct( ) -> LeanUSizeMixedStruct { let obj_nat = Nat::from_obj(&ptr.as_ctor().get(0)); let uval = ptr.get_usize(0); - let u64val = ptr.get_64(0); - let u32val = ptr.get_32(0); - let bval = ptr.get_8(0) != 0; + let u64val = ptr.get_num_64(0); + let u32val = ptr.get_num_32(0); + let bval = ptr.get_num_8(0) != 0; - let ctor = LeanCtor::alloc(0, 1, 21); - ctor.set(0, build_nat(&obj_nat)); - let out = LeanUSizeMixedStruct::new(ctor.into()); + let out = LeanUSizeMixedStruct::alloc(); + out.set_obj(0, build_nat(&obj_nat)); out.set_usize(0, uval); - out.set_64(0, u64val); - out.set_32(0, u32val); - out.set_8(0, bval as u8); + out.set_num_64(0, u64val); + out.set_num_32(0, u32val); + out.set_num_8(0, bval as u8); out } @@ -470,16 +484,15 @@ pub(crate) extern "C" fn rs_roundtrip_bool_struct( ptr: LeanBoolStruct>, ) -> LeanBoolStruct { let obj_nat = Nat::from_obj(&ptr.as_ctor().get(0)); - let b1 = ptr.get_8(0); - let b2 = ptr.get_8(1); - let b3 = ptr.get_8(2); - - let ctor = LeanCtor::alloc(0, 1, 3); - ctor.set(0, build_nat(&obj_nat)); - let out = LeanBoolStruct::new(ctor.into()); - out.set_8(0, b1); - out.set_8(1, b2); - out.set_8(2, b3); + let b1 = ptr.get_num_8(0); + let b2 = ptr.get_num_8(1); + let b3 = ptr.get_num_8(2); + + let out = LeanBoolStruct::alloc(); + out.set_obj(0, build_nat(&obj_nat)); + out.set_num_8(0, b1); + out.set_num_8(1, b2); + out.set_num_8(2, b3); out } @@ -491,16 +504,122 @@ pub(crate) extern "C" fn rs_roundtrip_multi_u32_struct( ptr: LeanMultiU32Struct>, ) -> LeanMultiU32Struct { let obj_nat = Nat::from_obj(&ptr.as_ctor().get(0)); - let a = ptr.get_32(0); - let b = ptr.get_32(1); - let c = ptr.get_32(2); - - let ctor = LeanCtor::alloc(0, 1, 12); - ctor.set(0, build_nat(&obj_nat)); - let out = LeanMultiU32Struct::new(ctor.into()); - out.set_32(0, a); - out.set_32(1, b); - out.set_32(2, c); + let a = ptr.get_num_32(0); + let b = ptr.get_num_32(1); + let c = ptr.get_num_32(2); + + let out = LeanMultiU32Struct::alloc(); + out.set_obj(0, build_nat(&obj_nat)); + out.set_num_32(0, a); + out.set_num_32(1, b); + out.set_num_32(2, c); + out +} + +// ============================================================================= +// TestInductive roundtrip (multi-constructor with non-zero tags) +// ============================================================================= + +/// Round-trip a TestInductive. +/// - tag 0 (empty): no fields, scalar representation +/// - tag 1 (withScalars): 2 u64 fields +/// - tag 2 (withMixed): 1 obj + 1 u32 + 1 bool +#[unsafe(no_mangle)] +pub(crate) extern "C" fn rs_roundtrip_test_inductive( + ptr: LeanTestInductive>, +) -> LeanTestInductive { + // Tag 0 (empty) has no fields — Lean represents as a scalar + if ptr.inner().is_scalar() { + return LeanTestInductive::new(ptr.inner().to_owned_ref()); + } + + let ctor = ptr.as_ctor(); + match ctor.tag() { + 1 => { + // withScalars: use the variant domain type + let src = LeanTestInductiveWithScalars::from_ctor(ctor); + let a = src.get_num_64(0); + let b = src.get_num_64(1); + + let dst = LeanTestInductiveWithScalars::alloc(); + dst.set_num_64(0, a); + dst.set_num_64(1, b); + LeanTestInductive::new(dst.into()) + } + 2 => { + // withMixed: use the variant domain type + let src = LeanTestInductiveWithMixed::from_ctor(ctor); + let obj_nat = Nat::from_obj(&src.as_ctor().get(0)); + let x = src.get_num_32(0); + let flag = src.get_num_8(0); + + let dst = LeanTestInductiveWithMixed::alloc(); + dst.set_obj(0, build_nat(&obj_nat)); + dst.set_num_32(0, x); + dst.set_num_8(0, flag); + LeanTestInductive::new(dst.into()) + } + _ => unreachable!("Invalid TestInductive tag: {}", ctor.tag()), + } +} + +// ============================================================================= +// Nested structure roundtrip +// ============================================================================= + +/// Round-trip an Outer (structure containing ScalarStruct + String + UInt64). +/// Lean layout: 2 obj fields (inner : ScalarStruct, label : String), 1 u64 scalar. +#[unsafe(no_mangle)] +pub(crate) extern "C" fn rs_roundtrip_outer( + ptr: LeanOuter>, +) -> LeanOuter { + // Read: extract nested structure and scalar fields + let ctor = ptr.as_ctor(); + let inner_ref = ctor.get(0); // ScalarStruct (object field 0) + let label_ref = ctor.get(1); // String (object field 1) + let count = ptr.get_num_64(0); + + // Roundtrip the inner ScalarStruct through its own trait + let inner_wrapped = LeanScalarStruct(inner_ref); + let inner_u64 = inner_wrapped.get_num_64(0); + let inner_u32 = inner_wrapped.get_num_32(0); + let inner_u8 = inner_wrapped.get_num_8(0); + let inner_obj = Nat::from_obj(&inner_wrapped.as_ctor().get(0)); + + let new_inner = LeanScalarStruct::alloc(); + new_inner.set_obj(0, build_nat(&inner_obj)); + new_inner.set_num_64(0, inner_u64); + new_inner.set_num_32(0, inner_u32); + new_inner.set_num_8(0, inner_u8); + + // Roundtrip the label string + let new_label = LeanString::new(&label_ref.as_string().to_string()); + + // Write: construct new Outer + let out = LeanOuter::alloc(); + out.set_obj(0, new_inner); + out.set_obj(1, new_label); + out.set_num_64(0, count); + out +} + +/// Round-trip an InductiveHolder (structure containing TestInductive + UInt32). +#[unsafe(no_mangle)] +pub(crate) extern "C" fn rs_roundtrip_inductive_holder( + ptr: LeanInductiveHolder>, +) -> LeanInductiveHolder { + // Read the inductive value (object field 0) and scalar + let ctor = ptr.as_ctor(); + let value_ref = ctor.get(0); + let tag_copy = ptr.get_num_32(0); + + // Roundtrip the inner TestInductive + let new_value = rs_roundtrip_test_inductive(LeanTestInductive(value_ref)); + + // Write + let out = LeanInductiveHolder::alloc(); + out.set_obj(0, new_value); + out.set_num_32(0, tag_copy); out } @@ -740,9 +859,9 @@ pub(crate) extern "C" fn rs_owned_prod_multiply(pair: LeanProd) -> Le /// Owned ScalarStruct: sum all scalar fields using LeanCtorScalar. #[unsafe(no_mangle)] pub(crate) extern "C" fn rs_owned_scalar_sum(ptr: LeanScalarStruct) -> u64 { - let u64val = ptr.get_64(0); - let u32val = ptr.get_32(0) as u64; - let u8val = ptr.get_8(0) as u64; + let u64val = ptr.get_num_64(0); + let u32val = ptr.get_num_32(0) as u64; + let u8val = ptr.get_num_8(0) as u64; u64val + u32val + u8val // ptr drops here → lean_dec } From fc7c979716f7177c1c43da68c3bdce4a698029b3 Mon Sep 17 00:00:00 2001 From: samuelburnham <45365069+samuelburnham@users.noreply.github.com> Date: Tue, 14 Apr 2026 23:32:10 -0400 Subject: [PATCH 12/17] Add out-of-bounds asserts --- src/object.rs | 55 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 55 insertions(+) diff --git a/src/object.rs b/src/object.rs index 4d623e3..a1226cf 100644 --- a/src/object.rs +++ b/src/object.rs @@ -1229,6 +1229,11 @@ pub trait LeanCtorScalar { /// Set the `i`-th object field. Takes ownership of `val`. fn set_obj(&self, i: usize, val: impl Into) { + assert!( + i < Self::NUM_OBJ, + "object field index {i} out of bounds (NUM_OBJ = {})", + Self::NUM_OBJ + ); let val: LeanOwned = val.into(); unsafe { include::lean_ctor_set(self.as_ctor().as_raw(), to_u32(i), val.into_raw()) } } @@ -1236,19 +1241,39 @@ pub trait LeanCtorScalar { // -- USize fields -- fn get_usize(&self, i: usize) -> usize { + assert!( + i < Self::NUM_USIZE, + "USize field index {i} out of bounds (NUM_USIZE = {})", + Self::NUM_USIZE + ); self.as_ctor().get_usize(i) } fn set_usize(&self, i: usize, val: usize) { + assert!( + i < Self::NUM_USIZE, + "USize field index {i} out of bounds (NUM_USIZE = {})", + Self::NUM_USIZE + ); self.as_ctor().set_usize(i, val) } // -- 8-byte tier (UInt64 / Float) -- fn get_num_64(&self, i: usize) -> u64 { + assert!( + i < Self::NUM_64, + "64-bit field index {i} out of bounds (NUM_64 = {})", + Self::NUM_64 + ); self.as_ctor() .get_u64(scalar_base(&self.as_ctor(), Self::NUM_USIZE) + i * 8) } fn set_num_64(&self, i: usize, val: u64) { + assert!( + i < Self::NUM_64, + "64-bit field index {i} out of bounds (NUM_64 = {})", + Self::NUM_64 + ); self.as_ctor() .set_u64(scalar_base(&self.as_ctor(), Self::NUM_USIZE) + i * 8, val) } @@ -1256,10 +1281,20 @@ pub trait LeanCtorScalar { // -- 4-byte tier (UInt32 / Float32) -- fn get_num_32(&self, i: usize) -> u32 { + assert!( + i < Self::NUM_32, + "32-bit field index {i} out of bounds (NUM_32 = {})", + Self::NUM_32 + ); self.as_ctor() .get_u32(scalar_base(&self.as_ctor(), Self::NUM_USIZE) + Self::NUM_64 * 8 + i * 4) } fn set_num_32(&self, i: usize, val: u32) { + assert!( + i < Self::NUM_32, + "32-bit field index {i} out of bounds (NUM_32 = {})", + Self::NUM_32 + ); self.as_ctor().set_u32( scalar_base(&self.as_ctor(), Self::NUM_USIZE) + Self::NUM_64 * 8 + i * 4, val, @@ -1269,6 +1304,11 @@ pub trait LeanCtorScalar { // -- 2-byte tier (UInt16) -- fn get_num_16(&self, i: usize) -> u16 { + assert!( + i < Self::NUM_16, + "16-bit field index {i} out of bounds (NUM_16 = {})", + Self::NUM_16 + ); self.as_ctor().get_u16( scalar_base(&self.as_ctor(), Self::NUM_USIZE) + Self::NUM_64 * 8 @@ -1277,6 +1317,11 @@ pub trait LeanCtorScalar { ) } fn set_num_16(&self, i: usize, val: u16) { + assert!( + i < Self::NUM_16, + "16-bit field index {i} out of bounds (NUM_16 = {})", + Self::NUM_16 + ); self.as_ctor().set_u16( scalar_base(&self.as_ctor(), Self::NUM_USIZE) + Self::NUM_64 * 8 @@ -1289,6 +1334,11 @@ pub trait LeanCtorScalar { // -- 1-byte tier (UInt8 / Bool) -- fn get_num_8(&self, i: usize) -> u8 { + assert!( + i < Self::NUM_8, + "8-bit field index {i} out of bounds (NUM_8 = {})", + Self::NUM_8 + ); self.as_ctor().get_u8( scalar_base(&self.as_ctor(), Self::NUM_USIZE) + Self::NUM_64 * 8 @@ -1298,6 +1348,11 @@ pub trait LeanCtorScalar { ) } fn set_num_8(&self, i: usize, val: u8) { + assert!( + i < Self::NUM_8, + "8-bit field index {i} out of bounds (NUM_8 = {})", + Self::NUM_8 + ); self.as_ctor().set_u8( scalar_base(&self.as_ctor(), Self::NUM_USIZE) + Self::NUM_64 * 8 From 014d28a0aeb77b2ed9dd5782b10fbaaf5d773068 Mon Sep 17 00:00:00 2001 From: samuelburnham <45365069+samuelburnham@users.noreply.github.com> Date: Wed, 15 Apr 2026 19:04:54 -0400 Subject: [PATCH 13/17] Full inductives with `LeanCtorLayout` and `lean_inductive!` --- src/lib.rs | 281 +++++++++++++++++++++++++++++++- src/object.rs | 416 +++++++++++++++++++++--------------------------- src/test_ffi.rs | 216 +++++++++++++++++++------ 3 files changed, 627 insertions(+), 286 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 2a83e4f..3d2046e 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -71,10 +71,10 @@ pub unsafe extern "C" fn noop_foreach(_: *mut c_void, _: *mut include::lean_obje /// } /// ``` /// -/// For structures with scalar fields, implement [`LeanCtorScalar`] on -/// the generated type to get type-indexed `get_*`/`set_*` accessors. -/// -/// [`LeanCtorScalar`]: object::LeanCtorScalar +/// For structures with scalar fields, use [`lean_ctor!`] on the generated type +/// to get type-indexed `get_*`/`set_*` accessors. For multi-variant inductives, +/// declare one wrapper per variant (plus one for the inductive itself) and use +/// [`lean_inductive!`] to wire them together. #[macro_export] macro_rules! lean_domain_type { ($($(#[$meta:meta])* $name:ident;)*) => {$( @@ -139,3 +139,276 @@ macro_rules! lean_domain_type { } )*}; } + +/// Attach a single `lean_ctor_object` layout to a `lean_domain_type!` wrapper. +/// +/// Implements [`LeanCtorLayout<1>`](object::LeanCtorLayout) and generates +/// inherent `alloc()` / `set_obj` / `get_usize` / `get_num_*` / `set_num_*` +/// methods with bounds-checked, offset-baked access. +/// +/// Use for plain structures (tag defaults to 0) and for per-variant wrappers +/// of multi-variant inductives (pass `tag: N`). Within each scalar type size, +/// fields follow Lean's declaration order. +/// +/// ```ignore +/// lean_domain_type! { LeanFoo; } +/// lean_ctor!(LeanFoo { num_obj: 1, num_64: 2 }); +/// +/// let foo = LeanFoo::alloc(); +/// foo.set_obj(0, some_val); +/// foo.set_num_64(0, 42); +/// ``` +#[macro_export] +macro_rules! lean_ctor { + ($ty:ident { $($key:ident : $val:expr),* $(,)? }) => { + impl $crate::object::LeanCtorLayout<1> for $ty { + const LAYOUTS: [$crate::object::SingleCtorLayout; 1] = [ + $crate::object::SingleCtorLayout { + $($key: $val,)* + ..$crate::object::SingleCtorLayout::ZERO + } + ]; + } + + impl $ty { + #[doc(hidden)] + const __LAYOUT: $crate::object::SingleCtorLayout = + >::LAYOUTS[0]; + + /// Constructor tag this wrapper represents. + #[inline] + pub const fn ctor_tag() -> u8 { + Self::__LAYOUT.tag + } + + /// Get a borrowed reference to the `i`-th object field. + pub fn get_obj(&self, i: usize) -> $crate::object::LeanBorrowed<'_> { + assert!( + i < Self::__LAYOUT.num_obj, + "object field index {i} out of bounds (num_obj = {})", + Self::__LAYOUT.num_obj, + ); + let raw = unsafe { + $crate::include::lean_ctor_get( + self.as_raw(), + $crate::object::to_u32(i), + ) + }; + unsafe { $crate::object::LeanBorrowed::from_raw(raw) } + } + + /// Set the `i`-th object field. Takes ownership of `val`. + pub fn set_obj(&self, i: usize, val: impl Into<$crate::object::LeanOwned>) { + assert!( + i < Self::__LAYOUT.num_obj, + "object field index {i} out of bounds (num_obj = {})", + Self::__LAYOUT.num_obj, + ); + let val: $crate::object::LeanOwned = val.into(); + unsafe { + $crate::include::lean_ctor_set( + self.as_raw(), + $crate::object::to_u32(i), + val.into_raw(), + ); + } + } + + pub fn get_usize(&self, i: usize) -> usize { + assert!( + i < Self::__LAYOUT.num_usize, + "USize field index {i} out of bounds (num_usize = {})", + Self::__LAYOUT.num_usize, + ); + self.as_ctor().get_usize(i) + } + pub fn set_usize(&self, i: usize, val: usize) { + assert!( + i < Self::__LAYOUT.num_usize, + "USize field index {i} out of bounds (num_usize = {})", + Self::__LAYOUT.num_usize, + ); + self.as_ctor().set_usize(i, val) + } + + pub fn get_num_64(&self, i: usize) -> u64 { + assert!( + i < Self::__LAYOUT.num_64, + "64-bit field index {i} out of bounds (num_64 = {})", + Self::__LAYOUT.num_64, + ); + self.as_ctor().get_u64(Self::__LAYOUT.offset_64(i)) + } + pub fn set_num_64(&self, i: usize, val: u64) { + assert!( + i < Self::__LAYOUT.num_64, + "64-bit field index {i} out of bounds (num_64 = {})", + Self::__LAYOUT.num_64, + ); + self.as_ctor().set_u64(Self::__LAYOUT.offset_64(i), val) + } + + pub fn get_num_32(&self, i: usize) -> u32 { + assert!( + i < Self::__LAYOUT.num_32, + "32-bit field index {i} out of bounds (num_32 = {})", + Self::__LAYOUT.num_32, + ); + self.as_ctor().get_u32(Self::__LAYOUT.offset_32(i)) + } + pub fn set_num_32(&self, i: usize, val: u32) { + assert!( + i < Self::__LAYOUT.num_32, + "32-bit field index {i} out of bounds (num_32 = {})", + Self::__LAYOUT.num_32, + ); + self.as_ctor().set_u32(Self::__LAYOUT.offset_32(i), val) + } + + pub fn get_num_16(&self, i: usize) -> u16 { + assert!( + i < Self::__LAYOUT.num_16, + "16-bit field index {i} out of bounds (num_16 = {})", + Self::__LAYOUT.num_16, + ); + self.as_ctor().get_u16(Self::__LAYOUT.offset_16(i)) + } + pub fn set_num_16(&self, i: usize, val: u16) { + assert!( + i < Self::__LAYOUT.num_16, + "16-bit field index {i} out of bounds (num_16 = {})", + Self::__LAYOUT.num_16, + ); + self.as_ctor().set_u16(Self::__LAYOUT.offset_16(i), val) + } + + pub fn get_num_8(&self, i: usize) -> u8 { + assert!( + i < Self::__LAYOUT.num_8, + "8-bit field index {i} out of bounds (num_8 = {})", + Self::__LAYOUT.num_8, + ); + self.as_ctor().get_u8(Self::__LAYOUT.offset_8(i)) + } + pub fn set_num_8(&self, i: usize, val: u8) { + assert!( + i < Self::__LAYOUT.num_8, + "8-bit field index {i} out of bounds (num_8 = {})", + Self::__LAYOUT.num_8, + ); + self.as_ctor().set_u8(Self::__LAYOUT.offset_8(i), val) + } + } + + impl $ty<$crate::object::LeanOwned> { + /// Allocate a new constructor with this type's layout. + pub fn alloc() -> Self { + const L: $crate::object::SingleCtorLayout = + <$ty<$crate::object::LeanOwned> as $crate::object::LeanCtorLayout<1>>::LAYOUTS[0]; + Self::new($crate::object::LeanCtor::alloc(L.tag, L.num_obj, L.scalar_size()).into()) + } + } + }; +} + +/// Declare a Lean structure or multi-variant inductive and all its field +/// layouts in one shot. +/// +/// **Structure form** (one layout, tag 0): +/// ```ignore +/// lean_inductive! { +/// LeanPoint { num_obj: 2 } +/// } +/// ``` +/// +/// **Multi-variant form** (one wrapper per variant + top-level dispatch type): +/// ```ignore +/// lean_inductive! { +/// LeanCompareResult { +/// LeanCompareMatched { tag: 0 }, +/// LeanCompareMismatch { tag: 1, num_64: 3 }, +/// LeanCompareNotFound { tag: 2 }, +/// } +/// } +/// +/// // Read side: +/// match result.as_ctor().tag() { +/// 1 => { +/// let m = LeanCompareMismatch::from_ctor(result.as_ctor()); +/// let first_diff = m.get_num_64(2); +/// } +/// _ => { /* matched or not found */ } +/// } +/// +/// // Write side: +/// let m = LeanCompareMismatch::alloc(); +/// m.set_num_64(0, lean_size); +/// let result: LeanCompareResult = m.into(); +/// ``` +/// +/// The form is disambiguated by the token after the first inner ident: `:` +/// (key-value pair) means structure, `{` (brace group) means variant. Variants +/// must be listed in tag order (tag 0 first, dense) and their names must differ +/// from the top-level name. +/// +/// This macro composes [`lean_domain_type!`] + [`lean_ctor!`]. Use those +/// directly if you need to split the domain-type declaration from the layout +/// (e.g. to declare wrappers that aren't ctors, or to attach the layout from a +/// different module). +#[macro_export] +macro_rules! lean_inductive { + // --- Structure form: LeanFoo { num_obj: 1, num_64: 2 } --- + // + // Distinguished from the multi-variant arm by the trailing `:` after the + // first inner ident (vs. `{` for the multi-variant case). + ( + $(#[$top_meta:meta])* + $top:ident { $($key:ident : $val:expr),* $(,)? } + ) => { + $crate::lean_domain_type! { $(#[$top_meta])* $top; } + $crate::lean_ctor!($top { $($key : $val),* }); + }; + + // --- Multi-variant form: LeanFoo { Variant1 { ... }, Variant2 { ... } } --- + ( + $(#[$top_meta:meta])* + $top:ident { + $( + $(#[$var_meta:meta])* + $variant:ident { $($key:ident : $val:expr),* $(,)? } + ),+ $(,)? + } + ) => { + $crate::lean_domain_type! { + $(#[$top_meta])* $top; + $( $(#[$var_meta])* $variant; )+ + } + $( $crate::lean_ctor!($variant { $($key : $val),* }); )+ + + impl + $crate::object::LeanCtorLayout<{ [ $( stringify!($variant) ),+ ].len() }> + for $top + { + const LAYOUTS: [ + $crate::object::SingleCtorLayout; + [ $( stringify!($variant) ),+ ].len() + ] = [ + $( + <$variant<$crate::object::LeanOwned> + as $crate::object::LeanCtorLayout<1>>::LAYOUTS[0], + )+ + ]; + } + + $( + impl From<$variant<$crate::object::LeanOwned>> + for $top<$crate::object::LeanOwned> + { + #[inline] + fn from(v: $variant<$crate::object::LeanOwned>) -> Self { + Self::new(v.into()) + } + } + )+ + }; +} diff --git a/src/object.rs b/src/object.rs index a1226cf..b20e62f 100644 --- a/src/object.rs +++ b/src/object.rs @@ -34,11 +34,11 @@ mod tags { use tags::*; /// Constructor tag for `IO.Error.userError`. -const IO_ERROR_USER_ERROR_TAG: usize = 7; +const IO_ERROR_USER_ERROR_TAG: u8 = 7; /// Convert a `usize` to `u32` for the Lean C API, panicking on overflow. #[inline] -fn to_u32(val: usize) -> u32 { +pub fn to_u32(val: usize) -> u32 { u32::try_from(val).expect("value exceeds u32::MAX") } @@ -1150,9 +1150,10 @@ impl LeanCtor { } /// Allocate a new constructor object. - pub fn alloc(tag: usize, num_objs: usize, scalar_size: usize) -> Self { - let obj = - unsafe { include::lean_alloc_ctor(to_u32(tag), to_u32(num_objs), to_u32(scalar_size)) }; + pub fn alloc(tag: u8, num_objs: usize, scalar_size: usize) -> Self { + let obj = unsafe { + include::lean_alloc_ctor(u32::from(tag), to_u32(num_objs), to_u32(scalar_size)) + }; Self(LeanOwned(obj)) } @@ -1185,250 +1186,86 @@ impl From> for LeanOwned { } // ============================================================================= -// LeanCtorScalar — trait for type-indexed scalar field access +// LeanCtorLayout — layout metadata for structures and inductive variants // ============================================================================= -/// Trait for type-indexed scalar field access on domain types. +/// Layout of a single `lean_ctor_object`: its tag and field counts per +/// scalar type size. Describes both structures (one layout, tag 0) and one +/// variant of a multi-variant inductive (tag N). /// -/// Implement this on a `lean_domain_type!` wrapper to get `get_*`/`set_*` -/// methods that index by size tier rather than byte offset. Works for -/// structures and for specific variants of multi-constructor inductives. -/// Use [`impl_ctor_scalar!`] to reduce boilerplate: +/// Scalar fields are ordered after object/USize fields by **descending size** +/// (8B, 4B, 2B, 1B). Within a given size, Lean preserves declaration order. /// -/// ```ignore -/// lean_domain_type! { LeanFoo; } -/// impl_ctor_scalar!(LeanFoo { NUM_OBJ = 1, NUM_USIZE = 1, NUM_64 = 1 }); -/// -/// let count = foo.get_num_64(0); -/// let flag = foo.get_num_8(0); -/// ``` -/// -/// Within each size tier, Lean preserves **declaration order** — it does not -/// sub-sort by type. Each tier has one getter/setter pair that returns the -/// natural integer type for that width. Use `f64::from_bits()` / -/// `f64::to_bits()` for float fields, and `val != 0` for bool fields. -pub trait LeanCtorScalar { - /// Constructor tag (0 for most structures). - const TAG: usize = 0; - /// Number of object fields (`lean_object*`). - const NUM_OBJ: usize = 0; - /// Number of `USize` fields (pointer-width, after object fields). - const NUM_USIZE: usize = 0; - /// Number of 8-byte scalar fields (`UInt64` + `Float`). - const NUM_64: usize = 0; - /// Number of 4-byte scalar fields (`UInt32` + `Float32`). - const NUM_32: usize = 0; - /// Number of 2-byte scalar fields (`UInt16`). - const NUM_16: usize = 0; - /// Number of 1-byte scalar fields (`UInt8` + `Bool`). - const NUM_8: usize = 0; - - /// Access the underlying constructor. Provided automatically by - /// [`impl_ctor_scalar!`] — users should not need to implement this. - fn as_ctor(&self) -> LeanCtor>; - - /// Set the `i`-th object field. Takes ownership of `val`. - fn set_obj(&self, i: usize, val: impl Into) { - assert!( - i < Self::NUM_OBJ, - "object field index {i} out of bounds (NUM_OBJ = {})", - Self::NUM_OBJ - ); - let val: LeanOwned = val.into(); - unsafe { include::lean_ctor_set(self.as_ctor().as_raw(), to_u32(i), val.into_raw()) } - } - - // -- USize fields -- - - fn get_usize(&self, i: usize) -> usize { - assert!( - i < Self::NUM_USIZE, - "USize field index {i} out of bounds (NUM_USIZE = {})", - Self::NUM_USIZE - ); - self.as_ctor().get_usize(i) - } - fn set_usize(&self, i: usize, val: usize) { - assert!( - i < Self::NUM_USIZE, - "USize field index {i} out of bounds (NUM_USIZE = {})", - Self::NUM_USIZE - ); - self.as_ctor().set_usize(i, val) - } - - // -- 8-byte tier (UInt64 / Float) -- +/// Use [`lean_ctor!`](crate::lean_ctor) to attach this layout to a structure +/// or to a per-variant wrapper type; use [`lean_inductive!`](crate::lean_inductive) +/// to wire variant wrappers to the top-level inductive type. +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub struct SingleCtorLayout { + /// Constructor tag. Lean's `LEAN_MAX_CTOR_TAG` fits in `u8`. + pub tag: u8, + pub num_obj: usize, + pub num_usize: usize, + pub num_64: usize, + pub num_32: usize, + pub num_16: usize, + pub num_8: usize, +} + +impl SingleCtorLayout { + pub const ZERO: Self = Self { + tag: 0, + num_obj: 0, + num_usize: 0, + num_64: 0, + num_32: 0, + num_16: 0, + num_8: 0, + }; - fn get_num_64(&self, i: usize) -> u64 { - assert!( - i < Self::NUM_64, - "64-bit field index {i} out of bounds (NUM_64 = {})", - Self::NUM_64 - ); - self.as_ctor() - .get_u64(scalar_base(&self.as_ctor(), Self::NUM_USIZE) + i * 8) - } - fn set_num_64(&self, i: usize, val: u64) { - assert!( - i < Self::NUM_64, - "64-bit field index {i} out of bounds (NUM_64 = {})", - Self::NUM_64 - ); - self.as_ctor() - .set_u64(scalar_base(&self.as_ctor(), Self::NUM_USIZE) + i * 8, val) + /// Total byte size of the scalar region (USize + 64 + 32 + 16 + 8 groups). + #[inline] + pub const fn scalar_size(&self) -> usize { + self.num_usize * size_of::() + + self.num_64 * 8 + + self.num_32 * 4 + + self.num_16 * 2 + + self.num_8 } - // -- 4-byte tier (UInt32 / Float32) -- - - fn get_num_32(&self, i: usize) -> u32 { - assert!( - i < Self::NUM_32, - "32-bit field index {i} out of bounds (NUM_32 = {})", - Self::NUM_32 - ); - self.as_ctor() - .get_u32(scalar_base(&self.as_ctor(), Self::NUM_USIZE) + Self::NUM_64 * 8 + i * 4) - } - fn set_num_32(&self, i: usize, val: u32) { - assert!( - i < Self::NUM_32, - "32-bit field index {i} out of bounds (NUM_32 = {})", - Self::NUM_32 - ); - self.as_ctor().set_u32( - scalar_base(&self.as_ctor(), Self::NUM_USIZE) + Self::NUM_64 * 8 + i * 4, - val, - ) + /// Byte offset from `lean_ctor_obj_cptr` where fixed-size scalar fields begin + /// (after object fields and USize fields). + #[inline] + pub const fn scalar_base(&self) -> usize { + (self.num_obj + self.num_usize) * size_of::() } - // -- 2-byte tier (UInt16) -- - - fn get_num_16(&self, i: usize) -> u16 { - assert!( - i < Self::NUM_16, - "16-bit field index {i} out of bounds (NUM_16 = {})", - Self::NUM_16 - ); - self.as_ctor().get_u16( - scalar_base(&self.as_ctor(), Self::NUM_USIZE) - + Self::NUM_64 * 8 - + Self::NUM_32 * 4 - + i * 2, - ) + #[inline] + pub const fn offset_64(&self, i: usize) -> usize { + self.scalar_base() + i * 8 } - fn set_num_16(&self, i: usize, val: u16) { - assert!( - i < Self::NUM_16, - "16-bit field index {i} out of bounds (NUM_16 = {})", - Self::NUM_16 - ); - self.as_ctor().set_u16( - scalar_base(&self.as_ctor(), Self::NUM_USIZE) - + Self::NUM_64 * 8 - + Self::NUM_32 * 4 - + i * 2, - val, - ) + #[inline] + pub const fn offset_32(&self, i: usize) -> usize { + self.scalar_base() + self.num_64 * 8 + i * 4 } - - // -- 1-byte tier (UInt8 / Bool) -- - - fn get_num_8(&self, i: usize) -> u8 { - assert!( - i < Self::NUM_8, - "8-bit field index {i} out of bounds (NUM_8 = {})", - Self::NUM_8 - ); - self.as_ctor().get_u8( - scalar_base(&self.as_ctor(), Self::NUM_USIZE) - + Self::NUM_64 * 8 - + Self::NUM_32 * 4 - + Self::NUM_16 * 2 - + i, - ) + #[inline] + pub const fn offset_16(&self, i: usize) -> usize { + self.scalar_base() + self.num_64 * 8 + self.num_32 * 4 + i * 2 } - fn set_num_8(&self, i: usize, val: u8) { - assert!( - i < Self::NUM_8, - "8-bit field index {i} out of bounds (NUM_8 = {})", - Self::NUM_8 - ); - self.as_ctor().set_u8( - scalar_base(&self.as_ctor(), Self::NUM_USIZE) - + Self::NUM_64 * 8 - + Self::NUM_32 * 4 - + Self::NUM_16 * 2 - + i, - val, - ) + #[inline] + pub const fn offset_8(&self, i: usize) -> usize { + self.scalar_base() + self.num_64 * 8 + self.num_32 * 4 + self.num_16 * 2 + i } } -/// Byte offset from `lean_ctor_obj_cptr` where fixed-size scalar fields begin. +/// Declarative layout metadata for ctor-backed domain types. /// -/// Use this for multi-constructor inductives where [`LeanCtorScalar`] doesn't -/// apply because the scalar layout varies per constructor tag. `num_usize` is -/// the number of `USize` fields (0 for most types). -#[inline] -pub fn scalar_base(ctor: &LeanCtor, num_usize: usize) -> usize { - (ctor.num_objs() + num_usize) * size_of::() -} - -/// Implement [`LeanCtorScalar`] for a `lean_domain_type!` type and generate -/// an `alloc()` constructor on the owned variant. -/// -/// Works for structures (tag 0) and specific variants of multi-constructor -/// inductives (set `TAG` to the variant's constructor tag). -/// -/// ```ignore -/// // Structure (tag defaults to 0): -/// impl_ctor_scalar!(LeanFoo { NUM_OBJ = 1, NUM_64 = 2 }); -/// -/// // For an inductive like: -/// // inductive CompareResult -/// // | match -/// // | mismatch (a b c : UInt64) -/// // | notFound -/// // where the `mismatch` variant has tag 1 and 3 u64 fields: -/// impl_ctor_scalar!(LeanMismatch { TAG = 1, NUM_64 = 3 }); -/// -/// let foo = LeanFoo::alloc(); -/// foo.set_obj(0, some_val); -/// foo.set_num_64(0, 42); -/// -/// let m = LeanMismatch::alloc(); -/// m.set_num_64(0, lean_size); -/// m.set_num_64(1, rust_size); -/// m.set_num_64(2, first_diff); -/// ``` -#[macro_export] -macro_rules! impl_ctor_scalar { - ($ty:ident { $($const:ident = $val:expr),* $(,)? }) => { - impl $crate::object::LeanCtorScalar for $ty { - $(const $const: usize = $val;)* - fn as_ctor(&self) -> $crate::object::LeanCtor<$crate::object::LeanBorrowed<'_>> { - self.as_ctor() - } - } - - impl $ty<$crate::object::LeanOwned> { - /// Allocate a new constructor with the layout defined by [`LeanCtorScalar`]. - pub fn alloc() -> Self { - use $crate::object::LeanCtorScalar; - let scalar_size = - ::NUM_USIZE * std::mem::size_of::() - + ::NUM_64 * 8 - + ::NUM_32 * 4 - + ::NUM_16 * 2 - + ::NUM_8; - Self::new($crate::object::LeanCtor::alloc( - ::TAG, - ::NUM_OBJ, - scalar_size, - ).into()) - } - } - }; +/// - Structures and per-variant wrappers implement `LeanCtorLayout<1>` via +/// [`lean_ctor!`](crate::lean_ctor) (the wrapper represents a single +/// `lean_ctor_object` layout). +/// - Top-level multi-variant inductives implement `LeanCtorLayout` where +/// `N` is the variant count, via [`lean_inductive!`](crate::lean_inductive). +pub trait LeanCtorLayout { + const LAYOUTS: [SingleCtorLayout; N]; } // ============================================================================= @@ -2235,3 +2072,116 @@ impl LeanRef for LeanShared { self.0.as_raw() } } + +// ============================================================================= +// Unit tests for layout math (no Lean runtime needed) +// ============================================================================= + +#[cfg(test)] +mod layout_tests { + use super::*; + + #[test] + fn zero_layout_has_zero_size() { + let z = SingleCtorLayout::ZERO; + assert_eq!(z.tag, 0); + assert_eq!(z.num_obj, 0); + assert_eq!(z.scalar_size(), 0); + assert_eq!(z.scalar_base(), 0); + } + + #[test] + fn scalar_size_sums_all_groups() { + let l = SingleCtorLayout { + tag: 0, + num_obj: 0, + num_usize: 2, + num_64: 3, + num_32: 4, + num_16: 5, + num_8: 7, + }; + let expected = 2 * size_of::() + 3 * 8 + 4 * 4 + 5 * 2 + 7; + assert_eq!(l.scalar_size(), expected); + } + + #[test] + fn scalar_base_skips_obj_and_usize() { + let l = SingleCtorLayout { + num_obj: 3, + num_usize: 2, + ..SingleCtorLayout::ZERO + }; + // 3 obj slots + 2 usize slots, all pointer-width + assert_eq!(l.scalar_base(), 5 * size_of::()); + } + + #[test] + fn offsets_stack_by_descending_size() { + let l = SingleCtorLayout { + num_obj: 1, + num_usize: 1, + num_64: 2, + num_32: 2, + num_16: 1, + num_8: 3, + tag: 0, + }; + let base = l.scalar_base(); + // 64-bit group starts at `base`. + assert_eq!(l.offset_64(0), base); + assert_eq!(l.offset_64(1), base + 8); + // 32-bit group starts after 2 u64s. + assert_eq!(l.offset_32(0), base + 16); + assert_eq!(l.offset_32(1), base + 16 + 4); + // 16-bit group starts after 2 u64s + 2 u32s. + assert_eq!(l.offset_16(0), base + 16 + 8); + // 8-bit group starts after 2 u64s + 2 u32s + 1 u16. + assert_eq!(l.offset_8(0), base + 16 + 8 + 2); + assert_eq!(l.offset_8(2), base + 16 + 8 + 2 + 2); + } + + #[test] + fn offsets_handle_empty_groups() { + // u64 + u8, skipping u32/u16 entirely. + let l = SingleCtorLayout { + num_64: 1, + num_8: 1, + ..SingleCtorLayout::ZERO + }; + assert_eq!(l.offset_64(0), 0); + // u8 sits immediately after the u64: 32/16 groups are zero-width. + assert_eq!(l.offset_8(0), 8); + assert_eq!(l.scalar_size(), 8 + 1); + } + + #[test] + fn layout_is_copy_in_const_context() { + // Confirms SingleCtorLayout can be read through LeanCtorLayout in const + // code — this is what the generated accessor methods rely on. + struct Marker; + impl LeanCtorLayout<2> for Marker { + const LAYOUTS: [SingleCtorLayout; 2] = [ + SingleCtorLayout { + tag: 0, + num_obj: 1, + ..SingleCtorLayout::ZERO + }, + SingleCtorLayout { + tag: 1, + num_64: 2, + ..SingleCtorLayout::ZERO + }, + ]; + } + const L0: SingleCtorLayout = >::LAYOUTS[0]; + const L1: SingleCtorLayout = >::LAYOUTS[1]; + const OFF: usize = L1.offset_64(1); + + assert_eq!(L0.tag, 0); + assert_eq!(L0.num_obj, 1); + assert_eq!(L1.tag, 1); + assert_eq!(L1.num_64, 2); + assert_eq!(OFF, 8); + } +} diff --git a/src/test_ffi.rs b/src/test_ffi.rs index ceee753..14892f3 100644 --- a/src/test_ffi.rs +++ b/src/test_ffi.rs @@ -12,9 +12,9 @@ use std::sync::LazyLock; use crate::include; use crate::nat::Nat; use crate::object::{ - ExternalClass, LeanArray, LeanBool, LeanBorrowed, LeanByteArray, LeanCtor, LeanCtorScalar, - LeanExcept, LeanExternal, LeanIOResult, LeanList, LeanNat, LeanOption, LeanOwned, LeanProd, - LeanRef, LeanString, + ExternalClass, LeanArray, LeanBool, LeanBorrowed, LeanByteArray, LeanCtor, LeanExcept, + LeanExternal, LeanIOResult, LeanList, LeanNat, LeanOption, LeanOwned, LeanProd, LeanRef, + LeanString, }; // ============================================================================= @@ -26,56 +26,55 @@ crate::lean_domain_type! { LeanPoint; /// Lean `NatTree` — inductive NatTree | leaf : Nat → NatTree | node : NatTree → NatTree → NatTree LeanNatTree; + /// Lean `RustData` — opaque external object + LeanRustData; +} + +crate::lean_inductive! { /// Lean `ScalarStruct` — structure ScalarStruct where obj : Nat; u8val : UInt8; u32val : UInt32; u64val : UInt64 - LeanScalarStruct; + LeanScalarStruct { num_obj: 1, num_64: 1, num_32: 1, num_8: 1 } +} +crate::lean_inductive! { /// Lean `ExtScalarStruct` — all scalar types - LeanExtScalarStruct; + LeanExtScalarStruct { num_obj: 1, num_64: 2, num_32: 2, num_16: 1, num_8: 1 } +} +crate::lean_inductive! { /// Lean `USizeStruct` — structure USizeStruct where obj : Nat; uval : USize; u8val : UInt8 - LeanUSizeStruct; - /// Lean `RustData` — opaque external object - LeanRustData; + LeanUSizeStruct { num_obj: 1, num_usize: 1, num_8: 1 } +} +crate::lean_inductive! { /// Lean `USizeMixedStruct` — structure with USize + u64 + u32 + Bool - LeanUSizeMixedStruct; + LeanUSizeMixedStruct { num_obj: 1, num_usize: 1, num_64: 1, num_32: 1, num_8: 1 } } - -// Test LeanCtorScalar trait -crate::lean_domain_type! { +crate::lean_inductive! { /// Lean `BoolStruct` — structure BoolStruct where obj : Nat; b1 : Bool; b2 : Bool; b3 : Bool - LeanBoolStruct; + LeanBoolStruct { num_obj: 1, num_8: 3 } +} +crate::lean_inductive! { /// Lean `MultiU32Struct` — structure MultiU32Struct where obj : Nat; a : UInt32; b : UInt32; c : UInt32 - LeanMultiU32Struct; - /// Lean `TestInductive` — the full inductive (for tag dispatch) - LeanTestInductive; - /// Lean `TestInductive.withScalars` variant (tag 1, 2 u64 fields) - LeanTestInductiveWithScalars; - /// Lean `TestInductive.withMixed` variant (tag 2, 1 obj + 1 u32 + 1 bool) - LeanTestInductiveWithMixed; + LeanMultiU32Struct { num_obj: 1, num_32: 3 } +} +crate::lean_inductive! { /// Lean `Outer` — structure containing ScalarStruct + String + UInt64 - LeanOuter; + LeanOuter { num_obj: 2, num_64: 1 } +} +crate::lean_inductive! { /// Lean `InductiveHolder` — structure containing TestInductive + UInt32 - LeanInductiveHolder; -} - -// ScalarStruct: 1 obj, scalars: u64, u32, u8 -crate::impl_ctor_scalar!(LeanScalarStruct { NUM_OBJ = 1, NUM_64 = 1, NUM_32 = 1, NUM_8 = 1 }); -// ExtScalarStruct: 1 obj, scalars: u64, f64, u32, f32, u16, u8 -crate::impl_ctor_scalar!(LeanExtScalarStruct { NUM_OBJ = 1, NUM_64 = 2, NUM_32 = 2, NUM_16 = 1, NUM_8 = 1 }); -// USizeStruct: 1 obj, 1 usize, u8 -crate::impl_ctor_scalar!(LeanUSizeStruct { NUM_OBJ = 1, NUM_USIZE = 1, NUM_8 = 1 }); -// USizeMixedStruct: 1 obj, 1 usize, scalars: u64, u32, bool -crate::impl_ctor_scalar!(LeanUSizeMixedStruct { NUM_OBJ = 1, NUM_USIZE = 1, NUM_64 = 1, NUM_32 = 1, NUM_8 = 1 }); -// BoolStruct: 1 obj, 3 bools -crate::impl_ctor_scalar!(LeanBoolStruct { NUM_OBJ = 1, NUM_8 = 3 }); -// MultiU32Struct: 1 obj, 3 u32s -crate::impl_ctor_scalar!(LeanMultiU32Struct { NUM_OBJ = 1, NUM_32 = 3 }); -// Outer: 2 obj fields (inner, label), 1 u64 scalar -crate::impl_ctor_scalar!(LeanOuter { NUM_OBJ = 2, NUM_64 = 1 }); -// InductiveHolder: 1 obj field (value), 1 u32 scalar -crate::impl_ctor_scalar!(LeanInductiveHolder { NUM_OBJ = 1, NUM_32 = 1 }); -// TestInductive.withScalars: tag 1, 0 obj fields, 2 u64 scalars -crate::impl_ctor_scalar!(LeanTestInductiveWithScalars { TAG = 1, NUM_64 = 2 }); -// TestInductive.withMixed: tag 2, 1 obj field, 1 u32 + 1 bool -crate::impl_ctor_scalar!(LeanTestInductiveWithMixed { TAG = 2, NUM_OBJ = 1, NUM_32 = 1, NUM_8 = 1 }); + LeanInductiveHolder { num_obj: 1, num_32: 1 } +} + +crate::lean_inductive! { + /// Lean `TestInductive` — multi-variant inductive for tag dispatch testing. + /// tag 0 (empty, scalar-unboxed), tag 1 (2 u64), tag 2 (1 obj + 1 u32 + 1 bool). + LeanTestInductive { + /// tag 0 — no fields (scalar-unboxed by Lean) + LeanTestInductiveEmpty { tag: 0 }, + /// tag 1 — 2 u64 fields + LeanTestInductiveWithScalars { tag: 1, num_64: 2 }, + /// tag 2 — 1 obj + 1 u32 + 1 bool + LeanTestInductiveWithMixed { tag: 2, num_obj: 1, num_32: 1, num_8: 1 }, + } +} /// Build a Lean Nat from a Rust Nat (delegates to `Nat::to_lean`). fn build_nat(n: &Nat) -> LeanOwned { @@ -408,10 +407,10 @@ pub(crate) extern "C" fn rs_roundtrip_ext_scalar_struct( ptr: LeanExtScalarStruct>, ) -> LeanExtScalarStruct { let obj_nat = Nat::from_obj(&ptr.as_ctor().get(0)); - // 8-byte tier: u64 at index 0, f64 at index 1 + // 8-byte scalars: u64 at index 0, f64 at index 1 let u64val = ptr.get_num_64(0); let fval = f64::from_bits(ptr.get_num_64(1)); - // 4-byte tier: u32 at index 0, f32 at index 1 + // 4-byte scalars: u32 at index 0, f32 at index 1 let u32val = ptr.get_num_32(0); let f32val = f32::from_bits(ptr.get_num_32(1)); let u16val = ptr.get_num_16(0); @@ -517,7 +516,7 @@ pub(crate) extern "C" fn rs_roundtrip_multi_u32_struct( } // ============================================================================= -// TestInductive roundtrip (multi-constructor with non-zero tags) +// TestInductive roundtrip (multi-variant with non-zero tags) // ============================================================================= /// Round-trip a TestInductive. @@ -544,7 +543,7 @@ pub(crate) extern "C" fn rs_roundtrip_test_inductive( let dst = LeanTestInductiveWithScalars::alloc(); dst.set_num_64(0, a); dst.set_num_64(1, b); - LeanTestInductive::new(dst.into()) + dst.into() } 2 => { // withMixed: use the variant domain type @@ -557,7 +556,7 @@ pub(crate) extern "C" fn rs_roundtrip_test_inductive( dst.set_obj(0, build_nat(&obj_nat)); dst.set_num_32(0, x); dst.set_num_8(0, flag); - LeanTestInductive::new(dst.into()) + dst.into() } _ => unreachable!("Invalid TestInductive tag: {}", ctor.tag()), } @@ -1774,3 +1773,122 @@ pub(crate) extern "C" fn rs_shared_persistent_nat( ); first.to_lean() } + +// ============================================================================= +// Unit tests for generated LAYOUTS (no Lean runtime needed) +// ============================================================================= + +#[cfg(test)] +mod layout_sanity { + use crate::object::{LeanCtorLayout, LeanOwned, SingleCtorLayout}; + + type Scalar = super::LeanScalarStruct; + type Ext = super::LeanExtScalarStruct; + type USize_ = super::LeanUSizeStruct; + type UMixed = super::LeanUSizeMixedStruct; + type Bools = super::LeanBoolStruct; + type MultiU32 = super::LeanMultiU32Struct; + type Outer = super::LeanOuter; + type Holder = super::LeanInductiveHolder; + type Ind = super::LeanTestInductive; + type Empty = super::LeanTestInductiveEmpty; + type WScalars = super::LeanTestInductiveWithScalars; + type WMixed = super::LeanTestInductiveWithMixed; + + fn layout>() -> SingleCtorLayout { + >::LAYOUTS[0] + } + + #[test] + fn scalar_struct_layout() { + let l = layout::(); + assert_eq!((l.tag, l.num_obj), (0, 1)); + assert_eq!((l.num_64, l.num_32, l.num_16, l.num_8), (1, 1, 0, 1)); + assert_eq!(l.scalar_size(), 8 + 4 + 1); + } + + #[test] + fn ext_scalar_struct_layout() { + let l = layout::(); + assert_eq!((l.tag, l.num_obj), (0, 1)); + assert_eq!((l.num_64, l.num_32, l.num_16, l.num_8), (2, 2, 1, 1)); + assert_eq!(l.offset_32(0), l.scalar_base() + 16); + assert_eq!(l.offset_8(0), l.scalar_base() + 16 + 8 + 2); + } + + #[test] + fn usize_struct_layout() { + let l = layout::(); + assert_eq!(l.num_usize, 1); + assert_eq!(l.num_8, 1); + // USize sits between obj fields and fixed-width scalars. + assert_eq!(l.scalar_base(), (1 + 1) * size_of::()); + } + + #[test] + fn usize_mixed_struct_layout() { + let l = layout::(); + assert_eq!( + (l.num_obj, l.num_usize, l.num_64, l.num_32, l.num_8), + (1, 1, 1, 1, 1) + ); + } + + #[test] + fn bool_struct_layout() { + let l = layout::(); + assert_eq!(l.num_8, 3); + assert_eq!(l.offset_8(2), l.scalar_base() + 2); + } + + #[test] + fn multi_u32_struct_layout() { + let l = layout::(); + assert_eq!(l.num_32, 3); + assert_eq!(l.offset_32(2), l.scalar_base() + 8); + } + + #[test] + fn outer_layout() { + let l = layout::(); + assert_eq!((l.num_obj, l.num_64), (2, 1)); + } + + #[test] + fn inductive_holder_layout() { + let l = layout::(); + assert_eq!((l.num_obj, l.num_32), (1, 1)); + } + + #[test] + fn inductive_variant_tags_match_positions() { + let layouts = >::LAYOUTS; + assert_eq!(layouts[0].tag, 0); + assert_eq!(layouts[1].tag, 1); + assert_eq!(layouts[2].tag, 2); + } + + #[test] + fn inductive_variant_layouts_match_variants() { + let layouts = >::LAYOUTS; + // empty: tag 0, no fields + assert_eq!(layouts[0], layout::()); + // withScalars: tag 1, 2 u64 + assert_eq!(layouts[1], layout::()); + assert_eq!(layouts[1].num_64, 2); + // withMixed: tag 2, 1 obj + 1 u32 + 1 bool + assert_eq!(layouts[2], layout::()); + assert_eq!( + (layouts[2].num_obj, layouts[2].num_32, layouts[2].num_8), + (1, 1, 1) + ); + } + + #[test] + fn ctor_tag_matches_layout_tag() { + assert_eq!(Empty::ctor_tag(), 0); + assert_eq!(WScalars::ctor_tag(), 1); + assert_eq!(WMixed::ctor_tag(), 2); + assert_eq!(Scalar::ctor_tag(), 0); + } +} From ad4629e1c3e42e0b519dbfda70f331b597a3d580 Mon Sep 17 00:00:00 2001 From: samuelburnham <45365069+samuelburnham@users.noreply.github.com> Date: Wed, 15 Apr 2026 19:20:29 -0400 Subject: [PATCH 14/17] Docs --- README.md | 107 +++++++++++++++++++++++++++++++----------------- Tests/FFI.lean | 2 +- src/lib.rs | 42 +++++++++++-------- src/object.rs | 17 ++++---- src/test_ffi.rs | 14 +++---- 5 files changed, 111 insertions(+), 71 deletions(-) diff --git a/README.md b/README.md index 2482799..0e1ee7a 100644 --- a/README.md +++ b/README.md @@ -102,42 +102,64 @@ becomes `LeanPoint` in Rust. #### Defining custom domain types -Use the `lean_domain_type!` macro to define newtypes for your Lean types: +Use the `lean_inductive!` macro to declare a ctor-backed domain type. It +generates a `#[repr(transparent)]` wrapper and attaches typed accessors +(`get_obj`/`set_obj`, `get_num_{64,32,16,8}`/`set_num_*`, `get_usize`/`set_usize`) +plus an `alloc()` constructor. Byte offsets are computed at compile time from +the declared layout; indices are bounds-checked. + +**Structure** (one constructor, tag 0): ```rust -lean_ffi::lean_domain_type! { - /// Lean `Point` — structure Point where x : Nat; y : Nat - LeanPoint; +lean_ffi::lean_inductive! { /// Lean `PutResponse` — structure PutResponse where message : String; hash : String - LeanPutResponse; + LeanPutResponse { num_obj: 2 } +} + +impl LeanPutResponse { + pub fn mk(message: &str, hash: &str) -> Self { + let obj = Self::alloc(); + obj.set_obj(0, LeanString::new(message)); + obj.set_obj(1, LeanString::new(hash)); + obj + } } ``` -This generates a `#[repr(transparent)]` wrapper with `Clone`, `Copy` for -`LeanBorrowed`, `self.inner()`, `self.as_raw()`, `self.into_raw()`, and `From` -impls. You can then add accessor methods — readers are generic over `R: LeanRef` -(work on both owned and borrowed), constructors return `LeanOwned`: +**Multi-variant inductive** — declare the top-level wrapper and each variant +in one call. `From> for Top` is generated for +ergonomic construction: ```rust -impl LeanPutResponse { - pub fn message(&self) -> LeanBorrowed<'_> { - self.as_ctor().get(0) // borrowed ref into the object, no lean_inc - } - pub fn hash(&self) -> LeanBorrowed<'_> { - self.as_ctor().get(1) +lean_ffi::lean_inductive! { + /// Lean `CompareResult` — | matched | mismatch (l r : UInt64) | notFound + LeanCompareResult { + LeanCompareMatched { tag: 0 }, + LeanCompareMismatch { tag: 1, num_64: 2 }, + LeanCompareNotFound { tag: 2 }, } } -impl LeanPutResponse { - pub fn mk(message: &str, hash: &str) -> Self { - let ctor = LeanCtor::alloc(0, 2, 0); - ctor.set(0, LeanString::new(message)); - ctor.set(1, LeanString::new(hash)); - Self::new(ctor.into()) +// Build side: +let m = LeanCompareMismatch::alloc(); +m.set_num_64(0, lean_size); +m.set_num_64(1, rust_size); +let result: LeanCompareResult = m.into(); + +// Read side: dispatch on ctor.tag(), then construct the variant wrapper: +match result.as_ctor().tag() { + 1 => { + let m = LeanCompareMismatch::from_ctor(result.as_ctor()); + let (l, r) = (m.get_num_64(0), m.get_num_64(1)); } + _ => { /* matched or not found */ } } ``` +For bare domain types without a ctor layout (opaque external objects, etc.) +use the lower-level `lean_domain_type!` macro, which generates just the wrapper +and does not attach a layout. + ### Inductive types and field layout Extra care must be taken when dealing with @@ -178,31 +200,42 @@ bytes): - `u32val` occupies bytes 8–11 - `u8val` occupies byte 12 -Use `LeanCtor` to access fields at the correct positions. Scalar getters and -setters take `(num_slots, byte_offset)` — `num_slots` is the total number of -8-byte slots (object fields + USize fields) preceding the scalar data area, and -`byte_offset` is the position of the field within that area. +Declare the field counts per size (object / USize / 64 / 32 / 16 / 8) and the +`lean_inductive!` macro computes offsets and generates size-indexed accessors: ```rust -impl LeanScalarStruct { - pub fn obj(&self) -> LeanBorrowed<'_> { self.as_ctor().get(0) } - pub fn u64val(&self) -> u64 { self.as_ctor().get_u64(1, 0) } - pub fn u32val(&self) -> u32 { self.as_ctor().get_u32(1, 8) } - pub fn u8val(&self) -> u8 { self.as_ctor().get_u8(1, 12) } +lean_ffi::lean_inductive! { + /// Lean `MyStruct` — u8val, obj, u32val, u64val (declaration order) + LeanMyStruct { num_obj: 1, num_64: 1, num_32: 1, num_8: 1 } } -impl LeanScalarStruct { +impl LeanMyStruct { + pub fn obj(&self) -> LeanBorrowed<'_> { self.get_obj(0) } + pub fn u64val(&self) -> u64 { self.get_num_64(0) } + pub fn u32val(&self) -> u32 { self.get_num_32(0) } + pub fn u8val(&self) -> u8 { self.get_num_8(0) } +} + +impl LeanMyStruct { pub fn mk(obj: LeanNat, u64val: u64, u32val: u32, u8val: u8) -> Self { - let ctor = LeanCtor::alloc(0, 1, 13); // tag 0, 1 obj field, 13 scalar bytes - ctor.set(0, obj); // object field 0 - ctor.set_u64(1, 0, u64val); // 1 slot before scalars, byte 0 - ctor.set_u32(1, 8, u32val); // 1 slot before scalars, byte 8 - ctor.set_u8(1, 12, u8val); // 1 slot before scalars, byte 12 - Self::new(ctor.into()) + let out = Self::alloc(); + out.set_obj(0, obj); + out.set_num_64(0, u64val); + out.set_num_32(0, u32val); + out.set_num_8(0, u8val); + out } } ``` +Indices are per size (first `num_64`, first `num_32`, …) and in Lean's +declaration order within each size. No hand-rolled byte offsets. + +If you need raw, unchecked access (e.g. for non-standard layouts) the +`LeanCtor` type exposes `get_u{8,16,32,64}(offset)` / `set_u{8,16,32,64}(offset, val)` +with absolute byte offsets matching Lean's `lean_ctor_get_uint*` / +`lean_ctor_set_uint*` C API. + ### External objects (`LeanExternal`) External objects let you store arbitrary Rust data inside a Lean object. Lean diff --git a/Tests/FFI.lean b/Tests/FFI.lean index 1462aef..62ee27e 100644 --- a/Tests/FFI.lean +++ b/Tests/FFI.lean @@ -387,7 +387,7 @@ def borrowedRoundtripTests : TestSeq := test "MultiU32Struct zeros" (roundtripMultiU32Struct ⟨0, 0, 0, 0⟩ == ⟨0, 0, 0, 0⟩) ++ test "MultiU32Struct max" (roundtripMultiU32Struct ⟨100, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF⟩ == ⟨100, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF⟩) ++ test "MultiU32Struct mixed" (roundtripMultiU32Struct ⟨1, 42, 0, 99⟩ == ⟨1, 42, 0, 99⟩) ++ - -- TestInductive: multi-constructor inductive with non-zero tags + -- TestInductive: multi-variant inductive with non-zero tags test "TestInductive empty" (roundtripTestInductive .empty == .empty) ++ test "TestInductive withScalars" (roundtripTestInductive (.withScalars 42 99) == .withScalars 42 99) ++ test "TestInductive withScalars max" (roundtripTestInductive (.withScalars 0xFFFFFFFFFFFFFFFF 0) == .withScalars 0xFFFFFFFFFFFFFFFF 0) ++ diff --git a/src/lib.rs b/src/lib.rs index 3d2046e..b549154 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -55,26 +55,26 @@ pub fn inc_heartbeat() { pub unsafe extern "C" fn noop_foreach(_: *mut c_void, _: *mut include::lean_object) {} /// Generate a `#[repr(transparent)]` newtype over a `LeanRef` type parameter -/// for a specific Lean type, with Clone, conditional Copy, from_raw, into_raw, and From impls. +/// for a specific Lean type, with Clone, conditional Copy, `as_ctor`, `from_ctor`, +/// `new`, `into_raw`, and `From> for LeanOwned` impls. /// -/// # Naming convention +/// This is the low-level building block for bare domain types (external +/// objects, types without a ctor layout, or types whose layout is attached +/// separately). For ctor-backed structures and inductives, prefer +/// [`lean_inductive!`] — it calls this macro internally and also attaches +/// the layout + accessor methods in one declaration. /// -/// Domain types should be prefixed with `Lean` to distinguish them from Lean-side types -/// and to match the built-in types (`LeanArray`, `LeanString`, `LeanNat`, etc.). +/// # Naming convention /// -/// For example, a Lean `Point` structure becomes `LeanPoint` in Rust: +/// Domain types should be prefixed with `Lean` to distinguish them from Lean-side +/// types and to match the built-in types (`LeanArray`, `LeanString`, `LeanNat`, etc.). /// /// ```ignore /// lean_domain_type! { -/// /// Lean `Point` — structure Point where x : Nat; y : Nat -/// LeanPoint; +/// /// Lean `RustData` — opaque external object +/// LeanRustData; /// } /// ``` -/// -/// For structures with scalar fields, use [`lean_ctor!`] on the generated type -/// to get type-indexed `get_*`/`set_*` accessors. For multi-variant inductives, -/// declare one wrapper per variant (plus one for the inductive itself) and use -/// [`lean_inductive!`] to wire them together. #[macro_export] macro_rules! lean_domain_type { ($($(#[$meta:meta])* $name:ident;)*) => {$( @@ -140,15 +140,21 @@ macro_rules! lean_domain_type { )*}; } -/// Attach a single `lean_ctor_object` layout to a `lean_domain_type!` wrapper. +/// Attach a single `lean_ctor_object` layout to a [`lean_domain_type!`] wrapper. /// /// Implements [`LeanCtorLayout<1>`](object::LeanCtorLayout) and generates -/// inherent `alloc()` / `set_obj` / `get_usize` / `get_num_*` / `set_num_*` -/// methods with bounds-checked, offset-baked access. +/// inherent `alloc()`, `ctor_tag()`, `get_obj` / `set_obj`, `get_usize` / +/// `set_usize`, and `get_num_{64,32,16,8}` / `set_num_{64,32,16,8}` methods. +/// Indices are bounds-checked against the declared counts and all byte offsets +/// are const-computed from the layout. +/// +/// Within each scalar size (8B / 4B / 2B / 1B), fields follow Lean's +/// declaration order. Tag defaults to 0; pass `tag: N` for non-zero variants. /// -/// Use for plain structures (tag defaults to 0) and for per-variant wrappers -/// of multi-variant inductives (pass `tag: N`). Within each scalar type size, -/// fields follow Lean's declaration order. +/// Most callers should use [`lean_inductive!`] instead — it composes +/// [`lean_domain_type!`] + `lean_ctor!` in one declaration. Reach for +/// `lean_ctor!` directly only if the domain type is declared separately (e.g. +/// in another module) or if you want to attach the layout after the fact. /// /// ```ignore /// lean_domain_type! { LeanFoo; } diff --git a/src/object.rs b/src/object.rs index b20e62f..f0d98ef 100644 --- a/src/object.rs +++ b/src/object.rs @@ -985,9 +985,10 @@ impl From> for LeanOwned { /// the object field count is read from the header and added internally, so /// `get_usize(0)` reads the first `USize` field. /// -/// The [`LeanCtorScalar`] trait computes these byte offsets automatically -/// from declared field counts, so users don't need to calculate them -/// manually. Reading all fields of `Foo` in C and with the trait: +/// The [`lean_ctor!`](crate::lean_ctor) and [`lean_inductive!`](crate::lean_inductive) +/// macros compute these byte offsets automatically from declared field counts +/// and attach typed accessor methods, so users don't need to calculate offsets +/// manually. Reading all fields of `Foo` in C and via the generated accessors: /// /// ```c /// // C — all offsets are from lean_ctor_obj_cptr @@ -998,11 +999,11 @@ impl From> for LeanOwned { /// ``` /// /// ```ignore -/// // Rust — using LeanCtorScalar trait (no manual offsets) -/// let name = foo.as_ctor().get(0); // object field -/// let size = foo.get_usize(0); // first USize field -/// let count = foo.get_num_64(0); // first 8-byte scalar -/// let flag = foo.get_num_8(0); // first 1-byte scalar +/// // Rust — generated by lean_ctor! / lean_inductive! (no manual offsets) +/// let name = foo.get_obj(0); // object field +/// let size = foo.get_usize(0); // first USize field +/// let count = foo.get_num_64(0); // first 8-byte scalar +/// let flag = foo.get_num_8(0); // first 1-byte scalar /// ``` #[repr(transparent)] pub struct LeanCtor(R); diff --git a/src/test_ffi.rs b/src/test_ffi.rs index 14892f3..ecb85a8 100644 --- a/src/test_ffi.rs +++ b/src/test_ffi.rs @@ -273,7 +273,7 @@ pub(crate) extern "C" fn rs_io_result_error_string( // LeanCtor scalar fields // ============================================================================= -/// Round-trip a ScalarStruct using LeanCtorScalar trait. +/// Round-trip a ScalarStruct via generated accessors. /// Lean layout: 1 obj field, then scalars by descending size: u64, u32, u8. /// Total scalar size: 8 + 4 + 1 = 13 bytes. #[unsafe(no_mangle)] @@ -399,7 +399,7 @@ pub(crate) extern "C" fn rs_external_get_label( // Extended scalar struct roundtrip (u8, u16, u32, u64, f64, f32) // ============================================================================= -/// Round-trip an ExtScalarStruct using LeanCtorScalar trait. +/// Round-trip an ExtScalarStruct via generated accessors. /// Lean layout: 1 obj, then descending size: u64, f64, u32, f32, u16, u8. /// Total scalar: 27 bytes. #[unsafe(no_mangle)] @@ -431,7 +431,7 @@ pub(crate) extern "C" fn rs_roundtrip_ext_scalar_struct( // USize struct roundtrip // ============================================================================= -/// Round-trip a USizeStruct using LeanCtorScalar trait. +/// Round-trip a USizeStruct via generated accessors. /// Lean layout: 1 obj field, 1 usize field, then u8. /// Alloc: num_objs=1, scalar_sz=9 (8 for usize + 1 for u8). #[unsafe(no_mangle)] @@ -449,7 +449,7 @@ pub(crate) extern "C" fn rs_roundtrip_usize_struct( out } -/// Round-trip a USizeMixedStruct using LeanCtorScalar trait. +/// Round-trip a USizeMixedStruct via generated accessors. /// Lean layout: 1 obj field, 1 usize field, then scalars: u64, u32, bool. /// Total scalar size: 8 (usize) + 8 (u64) + 4 (u32) + 1 (bool) = 21 bytes. #[unsafe(no_mangle)] @@ -475,7 +475,7 @@ pub(crate) extern "C" fn rs_roundtrip_usize_mixed_struct( // BoolStruct / MultiU32Struct roundtrips (batch scalar access) // ============================================================================= -/// Round-trip a BoolStruct using LeanCtorScalar trait. +/// Round-trip a BoolStruct via generated accessors. /// Lean layout: 1 obj field, then 3 Bool scalars. /// Total scalar size: 3 bytes. #[unsafe(no_mangle)] @@ -495,7 +495,7 @@ pub(crate) extern "C" fn rs_roundtrip_bool_struct( out } -/// Round-trip a MultiU32Struct using LeanCtorScalar trait. +/// Round-trip a MultiU32Struct via generated accessors. /// Lean layout: 1 obj field, then 3 UInt32 scalars. /// Total scalar size: 12 bytes. #[unsafe(no_mangle)] @@ -855,7 +855,7 @@ pub(crate) extern "C" fn rs_owned_prod_multiply(pair: LeanProd) -> Le Nat(fst.0 * snd.0).to_lean() } -/// Owned ScalarStruct: sum all scalar fields using LeanCtorScalar. +/// Owned ScalarStruct: sum all scalar fields via generated accessors. #[unsafe(no_mangle)] pub(crate) extern "C" fn rs_owned_scalar_sum(ptr: LeanScalarStruct) -> u64 { let u64val = ptr.get_num_64(0); From 87bbd8d66f03f5d022e440adf04d6e0ed3dbcf1c Mon Sep 17 00:00:00 2001 From: samuelburnham <45365069+samuelburnham@users.noreply.github.com> Date: Thu, 16 Apr 2026 11:20:58 -0400 Subject: [PATCH 15/17] Drop variant wrapper types and lean_ctor! --- README.md | 173 ++++++++++++----------- src/lib.rs | 361 ++++++++++++++++++------------------------------ src/object.rs | 104 +++++++------- src/test_ffi.rs | 142 ++++++++----------- 4 files changed, 332 insertions(+), 448 deletions(-) diff --git a/README.md b/README.md index 0e1ee7a..255ff4d 100644 --- a/README.md +++ b/README.md @@ -3,15 +3,31 @@ A Rust library that wraps low-level bindings to the [`lean.h`](https://github.com/leanprover/lean4/blob/master/src/include/lean/lean.h) Lean C library with a high-level API for safe and ergonomic FFI from Lean to -Rust. This allows the user to focus on the actual Rust logic rather than -manually manipulating pointers and keeping track of Lean reference counts. +Rust. This allows the user to focus on the actual Rust logic rather than manual +pointer manipulation and keeping track of Lean reference counts. -The Rust bindings are auto-generated with +The raw Rust bindings are auto-generated with [`rust-bindgen`](https://github.com/rust-lang/rust-bindgen). Bindgen runs in `build.rs` and generates unsafe Rust functions that link to `lean.h`. This module can be found at `target/release/build/lean-ffi-/out/lean.rs` after running `cargo build --release`. +## Features + +- **RAII refcounting** for `LeanOwned` owned references via Rust `Clone` and + `Drop` +- **Lifetime bounds** for `LeanBorrowed` borrowed references to prevent + use-after-free +- **Thread-safe shared references** via `LeanShared` (`lean_mark_mt` + + `Send + Sync`). +- **Typed domain wrappers** e.g. `LeanArray`, `LeanString` etc. with safe Rust + methods +- **`Nat` conversions** via `num-bigint`, handling tagged scalars and big-ints +- **`lean_inductive!` macro** for Lean `structure` / `inductive` types, + generating easy accessor methods without manually tracking byte offsets +- **Safe external objects** via `ExternalClass::register_with_drop::()` and + borrow-bound `LeanExternal::get(&self) -> &T`. + ## Background: Lean Ownership Model In Lean's C API, a **reference** is a `lean_object*` pointer to the header of a @@ -102,111 +118,115 @@ becomes `LeanPoint` in Rust. #### Defining custom domain types -Use the `lean_inductive!` macro to declare a ctor-backed domain type. It -generates a `#[repr(transparent)]` wrapper and attaches typed accessors -(`get_obj`/`set_obj`, `get_num_{64,32,16,8}`/`set_num_*`, `get_usize`/`set_usize`) -plus an `alloc()` constructor. Byte offsets are computed at compile time from -the declared layout; indices are bounds-checked. +`lean_inductive!` wraps a Lean `structure` or `inductive` with its full +per-constructor layout. It emits a `#[repr(transparent)]` newtype, a +[`LeanCtorLayout`] impl whose `LAYOUTS` slice has one entry per constructor +(indexed by tag), and typed accessors that bounds-check against the current +variant's layout: -**Structure** (one constructor, tag 0): +- `alloc(tag: u8)` — `lean_alloc_ctor` for a specific variant. +- `get_obj(i)` / `set_obj(i, val)` — object fields. +- `get_usize(i)` / `set_usize(i, val)` — `USize` fields. +- `get_num_{64,32,16,8}(i)` / `set_num_{64,32,16,8}(i, val)` — scalar fields. -```rust -lean_ffi::lean_inductive! { - /// Lean `PutResponse` — structure PutResponse where message : String; hash : String - LeanPutResponse { num_obj: 2 } -} +Variant layouts are listed inside `[ … ]`, in tag order. -impl LeanPutResponse { - pub fn mk(message: &str, hash: &str) -> Self { - let obj = Self::alloc(); - obj.set_obj(0, LeanString::new(message)); - obj.set_obj(1, LeanString::new(hash)); - obj +**Structure** — from `structure Point where x : Nat; y : Nat`: + +```rust +lean_ffi::lean_inductive! { LeanPoint [ { num_obj: 2 } ] } + +impl LeanPoint { + pub fn mk(x: LeanNat, y: LeanNat) -> Self { + let p = Self::alloc(0); + p.set_obj(0, x); + p.set_obj(1, y); + p } } ``` -**Multi-variant inductive** — declare the top-level wrapper and each variant -in one call. `From> for Top` is generated for -ergonomic construction: +**Inductive** — from: + +```lean +inductive CompareResult + | matched + | mismatch (leanSize rustSize : UInt64) + | notFound +``` ```rust lean_ffi::lean_inductive! { - /// Lean `CompareResult` — | matched | mismatch (l r : UInt64) | notFound - LeanCompareResult { - LeanCompareMatched { tag: 0 }, - LeanCompareMismatch { tag: 1, num_64: 2 }, - LeanCompareNotFound { tag: 2 }, - } + LeanCompareResult [ + { }, // matched + { num_64: 2 }, // mismatch + { }, // notFound + ] } -// Build side: -let m = LeanCompareMismatch::alloc(); +// Build: +let m = LeanCompareResult::alloc(1); m.set_num_64(0, lean_size); m.set_num_64(1, rust_size); -let result: LeanCompareResult = m.into(); -// Read side: dispatch on ctor.tag(), then construct the variant wrapper: +// Read — dispatch on the Lean tag, then access fields: match result.as_ctor().tag() { 1 => { - let m = LeanCompareMismatch::from_ctor(result.as_ctor()); - let (l, r) = (m.get_num_64(0), m.get_num_64(1)); + let (l, r) = (result.get_num_64(0), result.get_num_64(1)); } - _ => { /* matched or not found */ } + _ => { /* matched | notFound */ } } ``` -For bare domain types without a ctor layout (opaque external objects, etc.) -use the lower-level `lean_domain_type!` macro, which generates just the wrapper -and does not attach a layout. +For wrappers without a ctor layout (opaque externals, types represented as +`lean_box(n)`, etc.) use the lower-level `lean_domain_type!` macro. -### Inductive types and field layout +[`LeanCtorLayout`]: https://docs.rs/lean-ffi/latest/lean_ffi/object/trait.LeanCtorLayout.html -Extra care must be taken when dealing with -[inductive types](https://lean-lang.org/doc/reference/latest/The-Type-System/Inductive-Types/#run-time-inductives) -as the runtime memory layout of constructor fields may not match the declaration -order in Lean. Fields are reordered into three groups: +### Constructor field layout -1. Non-scalar fields (`lean_object*`), in declaration order -2. `USize` fields, in declaration order -3. Other scalar fields, in decreasing order by size, then declaration order - within each size +Lean +[reorders constructor fields](https://lean-lang.org/doc/reference/latest/The-Type-System/Inductive-Types/#run-time-inductives) +at runtime. Declaration order does **not** match memory order. For every +constructor, fields are laid out in this order: -This means a structure like +1. Object fields (`lean_object*`) — declaration order. +2. `USize` fields — declaration order. +3. Fixed-size scalars — descending size (8B → 4B → 2B → 1B), then declaration + order within each size. + +So for ```lean structure MyStruct where - u8val : UInt8 - obj : Nat + u8val : UInt8 + obj : Nat u32val : UInt32 u64val : UInt64 ``` -would be laid out as `[obj, u64val, u32val, u8val]` at runtime. Trivial wrapper -types (e.g. `Char` wraps `UInt32`) count as their underlying scalar type. +the runtime order is `[obj, u64val, u32val, u8val]`. Trivial wrappers (e.g. +`Char` over `UInt32`) count as their underlying scalar. -A constructor's memory looks like: +Memory: ``` -[header (8B)] [object fields (8B each)] [USize fields (8B each)] [scalar data area] +[header 8B] [object fields, 8B each] [USize fields, 8B each] [scalar bytes, descending size] ``` -Object fields and USize fields each occupy 8-byte slots. The scalar data area is -a flat region of bytes containing all remaining scalar field values, packed by -descending size. For `MyStruct` (1 object field, 0 USize fields, 13 scalar -bytes): +For `MyStruct` (`num_obj=1`, `num_usize=0`, `num_64=1`, `num_32=1`, `num_8=1`): -- `u64val` occupies bytes 0–7 of the scalar area -- `u32val` occupies bytes 8–11 -- `u8val` occupies byte 12 +- `u64val` at scalar bytes 0–7 +- `u32val` at scalar bytes 8–11 +- `u8val` at scalar byte 12 -Declare the field counts per size (object / USize / 64 / 32 / 16 / 8) and the -`lean_inductive!` macro computes offsets and generates size-indexed accessors: +`lean_inductive!` takes the per-size field counts and hands you size-indexed +accessors — `get_num_64(0)` for the first 8-byte scalar, `get_num_8(0)` for the +first 1-byte scalar, etc. No hand-rolled byte offsets: ```rust lean_ffi::lean_inductive! { - /// Lean `MyStruct` — u8val, obj, u32val, u64val (declaration order) - LeanMyStruct { num_obj: 1, num_64: 1, num_32: 1, num_8: 1 } + LeanMyStruct [ { num_obj: 1, num_64: 1, num_32: 1, num_8: 1 } ] } impl LeanMyStruct { @@ -215,26 +235,11 @@ impl LeanMyStruct { pub fn u32val(&self) -> u32 { self.get_num_32(0) } pub fn u8val(&self) -> u8 { self.get_num_8(0) } } - -impl LeanMyStruct { - pub fn mk(obj: LeanNat, u64val: u64, u32val: u32, u8val: u8) -> Self { - let out = Self::alloc(); - out.set_obj(0, obj); - out.set_num_64(0, u64val); - out.set_num_32(0, u32val); - out.set_num_8(0, u8val); - out - } -} ``` -Indices are per size (first `num_64`, first `num_32`, …) and in Lean's -declaration order within each size. No hand-rolled byte offsets. - -If you need raw, unchecked access (e.g. for non-standard layouts) the -`LeanCtor` type exposes `get_u{8,16,32,64}(offset)` / `set_u{8,16,32,64}(offset, val)` -with absolute byte offsets matching Lean's `lean_ctor_get_uint*` / -`lean_ctor_set_uint*` C API. +For raw access (non-standard layouts, hand-tuned code), `LeanCtor` exposes +`get_u{8,16,32,64}(offset)` / `set_u{8,16,32,64}(offset, val)` with absolute +byte offsets matching `lean_ctor_get_uint*` / `lean_ctor_set_uint*`. ### External objects (`LeanExternal`) diff --git a/src/lib.rs b/src/lib.rs index b549154..ec55fd2 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -54,24 +54,22 @@ pub fn inc_heartbeat() { /// Must only be used as a `lean_external_foreach_fn` callback. pub unsafe extern "C" fn noop_foreach(_: *mut c_void, _: *mut include::lean_object) {} -/// Generate a `#[repr(transparent)]` newtype over a `LeanRef` type parameter -/// for a specific Lean type, with Clone, conditional Copy, `as_ctor`, `from_ctor`, -/// `new`, `into_raw`, and `From> for LeanOwned` impls. +/// Declare a `#[repr(transparent)]` wrapper `Ty` for a Lean domain +/// type, with `inner`, `as_raw`, `as_ctor`, `from_ctor`, `new`, `into_raw`, +/// `Clone`, conditional `Copy`, and `From> for LeanOwned`. /// -/// This is the low-level building block for bare domain types (external -/// objects, types without a ctor layout, or types whose layout is attached -/// separately). For ctor-backed structures and inductives, prefer -/// [`lean_inductive!`] — it calls this macro internally and also attaches -/// the layout + accessor methods in one declaration. +/// This is the low-level primitive for wrappers that are **not** ctor-backed +/// — opaque externals, types represented by a tagged scalar (`lean_box(n)`), +/// or wrappers whose layout is attached from a different module. For Lean +/// `structure` / `inductive` types, use [`lean_inductive!`] instead; it calls +/// this macro and also attaches the layout and typed accessors. /// -/// # Naming convention -/// -/// Domain types should be prefixed with `Lean` to distinguish them from Lean-side -/// types and to match the built-in types (`LeanArray`, `LeanString`, `LeanNat`, etc.). +/// Wrapper names are prefixed with `Lean` to match the built-ins (`LeanArray`, +/// `LeanString`, `LeanNat`, …). /// /// ```ignore /// lean_domain_type! { -/// /// Lean `RustData` — opaque external object +/// /// Rust handle wrapped as an opaque Lean `@[extern_type]`. /// LeanRustData; /// } /// ``` @@ -140,76 +138,116 @@ macro_rules! lean_domain_type { )*}; } -/// Attach a single `lean_ctor_object` layout to a [`lean_domain_type!`] wrapper. +/// Declare a wrapper for a Lean `structure` or `inductive` with its full +/// per-constructor layout. +/// +/// Emits a [`lean_domain_type!`] wrapper, an [`LeanCtorLayout`](object::LeanCtorLayout) +/// impl whose `LAYOUTS` slice has one entry per Lean constructor (indexed by +/// tag), plus these inherent methods: /// -/// Implements [`LeanCtorLayout<1>`](object::LeanCtorLayout) and generates -/// inherent `alloc()`, `ctor_tag()`, `get_obj` / `set_obj`, `get_usize` / -/// `set_usize`, and `get_num_{64,32,16,8}` / `set_num_{64,32,16,8}` methods. -/// Indices are bounds-checked against the declared counts and all byte offsets -/// are const-computed from the layout. +/// - `alloc(tag: u8)` — `lean_alloc_ctor` for the given variant. +/// - `get_obj(i)` / `set_obj(i, val)` +/// - `get_usize(i)` / `set_usize(i, val)` +/// - `get_num_{64,32,16,8}(i)` / `set_num_{64,32,16,8}(i, val)` /// -/// Within each scalar size (8B / 4B / 2B / 1B), fields follow Lean's -/// declaration order. Tag defaults to 0; pass `tag: N` for non-zero variants. +/// Accessors read the object's actual tag from its header, look up +/// `LAYOUTS[tag]`, bounds-check the field index against that variant's +/// counts, and compute byte offsets. Within each scalar size (8B / 4B / 2B / +/// 1B), field indices follow Lean's declaration order. /// -/// Most callers should use [`lean_inductive!`] instead — it composes -/// [`lean_domain_type!`] + `lean_ctor!` in one declaration. Reach for -/// `lean_ctor!` directly only if the domain type is declared separately (e.g. -/// in another module) or if you want to attach the layout after the fact. +/// A `structure` is the one-variant case. Typical call, using the `Point` +/// from `structure Point where x : Nat; y : Nat`: /// /// ```ignore -/// lean_domain_type! { LeanFoo; } -/// lean_ctor!(LeanFoo { num_obj: 1, num_64: 2 }); +/// lean_inductive! { +/// /// Lean `Point` — see `Tests/Gen.lean`. +/// LeanPoint [ { num_obj: 2 } ] +/// } /// -/// let foo = LeanFoo::alloc(); -/// foo.set_obj(0, some_val); -/// foo.set_num_64(0, 42); +/// let p = LeanPoint::alloc(0); +/// p.set_obj(0, x_nat); +/// p.set_obj(1, y_nat); /// ``` +/// +/// Multi-variant — wrapping this Lean inductive: +/// +/// ```ignore +/// // Lean side: +/// // inductive BlockCompareResult +/// // | matched +/// // | mismatch (leanSize rustSize firstDiff : UInt64) +/// // | notFound +/// +/// lean_inductive! { +/// LeanBlockCompareResult [ +/// { }, // matched +/// { num_64: 3 }, // mismatch +/// { }, // notFound +/// ] +/// } +/// +/// // Build: +/// let m = LeanBlockCompareResult::alloc(1); +/// m.set_num_64(0, lean_size); +/// m.set_num_64(1, rust_size); +/// m.set_num_64(2, first_diff); +/// +/// // Read: +/// match result.as_ctor().tag() { +/// 1 => { +/// let (l, r, d) = (result.get_num_64(0), result.get_num_64(1), result.get_num_64(2)); +/// } +/// _ => { /* matched | notFound */ } +/// } +/// ``` +/// +/// `alloc(tag)` and every accessor panic if the index is out of range for +/// the current variant. #[macro_export] -macro_rules! lean_ctor { - ($ty:ident { $($key:ident : $val:expr),* $(,)? }) => { - impl $crate::object::LeanCtorLayout<1> for $ty { - const LAYOUTS: [$crate::object::SingleCtorLayout; 1] = [ - $crate::object::SingleCtorLayout { - $($key: $val,)* - ..$crate::object::SingleCtorLayout::ZERO - } +macro_rules! lean_inductive { + ( + $(#[$top_meta:meta])* + $top:ident [ + $( { $($key:ident : $val:expr),* $(,)? } ),+ $(,)? + ] + ) => { + $crate::lean_domain_type! { $(#[$top_meta])* $top; } + + impl $crate::object::LeanCtorLayout for $top { + const LAYOUTS: &'static [$crate::object::SingleCtorLayout] = &[ + $( + $crate::object::SingleCtorLayout { + $($key: $val,)* + ..$crate::object::SingleCtorLayout::ZERO + }, + )+ ]; } - impl $ty { + impl $top { + /// Layout of the variant this object currently holds (read from the + /// ctor's tag in its object header). #[doc(hidden)] - const __LAYOUT: $crate::object::SingleCtorLayout = - >::LAYOUTS[0]; - - /// Constructor tag this wrapper represents. #[inline] - pub const fn ctor_tag() -> u8 { - Self::__LAYOUT.tag + fn __variant_layout(&self) -> $crate::object::SingleCtorLayout { + let tag = self.as_ctor().tag() as usize; + ::LAYOUTS[tag] } /// Get a borrowed reference to the `i`-th object field. pub fn get_obj(&self, i: usize) -> $crate::object::LeanBorrowed<'_> { - assert!( - i < Self::__LAYOUT.num_obj, - "object field index {i} out of bounds (num_obj = {})", - Self::__LAYOUT.num_obj, - ); + let l = self.__variant_layout(); + assert!(i < l.num_obj, "object field {i} out of bounds (num_obj = {})", l.num_obj); let raw = unsafe { - $crate::include::lean_ctor_get( - self.as_raw(), - $crate::object::to_u32(i), - ) + $crate::include::lean_ctor_get(self.as_raw(), $crate::object::to_u32(i)) }; unsafe { $crate::object::LeanBorrowed::from_raw(raw) } } /// Set the `i`-th object field. Takes ownership of `val`. pub fn set_obj(&self, i: usize, val: impl Into<$crate::object::LeanOwned>) { - assert!( - i < Self::__LAYOUT.num_obj, - "object field index {i} out of bounds (num_obj = {})", - Self::__LAYOUT.num_obj, - ); + let l = self.__variant_layout(); + assert!(i < l.num_obj, "object field {i} out of bounds (num_obj = {})", l.num_obj); let val: $crate::object::LeanOwned = val.into(); unsafe { $crate::include::lean_ctor_set( @@ -221,200 +259,73 @@ macro_rules! lean_ctor { } pub fn get_usize(&self, i: usize) -> usize { - assert!( - i < Self::__LAYOUT.num_usize, - "USize field index {i} out of bounds (num_usize = {})", - Self::__LAYOUT.num_usize, - ); + let l = self.__variant_layout(); + assert!(i < l.num_usize, "USize field {i} out of bounds (num_usize = {})", l.num_usize); self.as_ctor().get_usize(i) } pub fn set_usize(&self, i: usize, val: usize) { - assert!( - i < Self::__LAYOUT.num_usize, - "USize field index {i} out of bounds (num_usize = {})", - Self::__LAYOUT.num_usize, - ); + let l = self.__variant_layout(); + assert!(i < l.num_usize, "USize field {i} out of bounds (num_usize = {})", l.num_usize); self.as_ctor().set_usize(i, val) } pub fn get_num_64(&self, i: usize) -> u64 { - assert!( - i < Self::__LAYOUT.num_64, - "64-bit field index {i} out of bounds (num_64 = {})", - Self::__LAYOUT.num_64, - ); - self.as_ctor().get_u64(Self::__LAYOUT.offset_64(i)) + let l = self.__variant_layout(); + assert!(i < l.num_64, "64-bit field {i} out of bounds (num_64 = {})", l.num_64); + self.as_ctor().get_u64(l.offset_64(i)) } pub fn set_num_64(&self, i: usize, val: u64) { - assert!( - i < Self::__LAYOUT.num_64, - "64-bit field index {i} out of bounds (num_64 = {})", - Self::__LAYOUT.num_64, - ); - self.as_ctor().set_u64(Self::__LAYOUT.offset_64(i), val) + let l = self.__variant_layout(); + assert!(i < l.num_64, "64-bit field {i} out of bounds (num_64 = {})", l.num_64); + self.as_ctor().set_u64(l.offset_64(i), val) } pub fn get_num_32(&self, i: usize) -> u32 { - assert!( - i < Self::__LAYOUT.num_32, - "32-bit field index {i} out of bounds (num_32 = {})", - Self::__LAYOUT.num_32, - ); - self.as_ctor().get_u32(Self::__LAYOUT.offset_32(i)) + let l = self.__variant_layout(); + assert!(i < l.num_32, "32-bit field {i} out of bounds (num_32 = {})", l.num_32); + self.as_ctor().get_u32(l.offset_32(i)) } pub fn set_num_32(&self, i: usize, val: u32) { - assert!( - i < Self::__LAYOUT.num_32, - "32-bit field index {i} out of bounds (num_32 = {})", - Self::__LAYOUT.num_32, - ); - self.as_ctor().set_u32(Self::__LAYOUT.offset_32(i), val) + let l = self.__variant_layout(); + assert!(i < l.num_32, "32-bit field {i} out of bounds (num_32 = {})", l.num_32); + self.as_ctor().set_u32(l.offset_32(i), val) } pub fn get_num_16(&self, i: usize) -> u16 { - assert!( - i < Self::__LAYOUT.num_16, - "16-bit field index {i} out of bounds (num_16 = {})", - Self::__LAYOUT.num_16, - ); - self.as_ctor().get_u16(Self::__LAYOUT.offset_16(i)) + let l = self.__variant_layout(); + assert!(i < l.num_16, "16-bit field {i} out of bounds (num_16 = {})", l.num_16); + self.as_ctor().get_u16(l.offset_16(i)) } pub fn set_num_16(&self, i: usize, val: u16) { - assert!( - i < Self::__LAYOUT.num_16, - "16-bit field index {i} out of bounds (num_16 = {})", - Self::__LAYOUT.num_16, - ); - self.as_ctor().set_u16(Self::__LAYOUT.offset_16(i), val) + let l = self.__variant_layout(); + assert!(i < l.num_16, "16-bit field {i} out of bounds (num_16 = {})", l.num_16); + self.as_ctor().set_u16(l.offset_16(i), val) } pub fn get_num_8(&self, i: usize) -> u8 { - assert!( - i < Self::__LAYOUT.num_8, - "8-bit field index {i} out of bounds (num_8 = {})", - Self::__LAYOUT.num_8, - ); - self.as_ctor().get_u8(Self::__LAYOUT.offset_8(i)) + let l = self.__variant_layout(); + assert!(i < l.num_8, "8-bit field {i} out of bounds (num_8 = {})", l.num_8); + self.as_ctor().get_u8(l.offset_8(i)) } pub fn set_num_8(&self, i: usize, val: u8) { - assert!( - i < Self::__LAYOUT.num_8, - "8-bit field index {i} out of bounds (num_8 = {})", - Self::__LAYOUT.num_8, - ); - self.as_ctor().set_u8(Self::__LAYOUT.offset_8(i), val) + let l = self.__variant_layout(); + assert!(i < l.num_8, "8-bit field {i} out of bounds (num_8 = {})", l.num_8); + self.as_ctor().set_u8(l.offset_8(i), val) } } - impl $ty<$crate::object::LeanOwned> { - /// Allocate a new constructor with this type's layout. - pub fn alloc() -> Self { - const L: $crate::object::SingleCtorLayout = - <$ty<$crate::object::LeanOwned> as $crate::object::LeanCtorLayout<1>>::LAYOUTS[0]; - Self::new($crate::object::LeanCtor::alloc(L.tag, L.num_obj, L.scalar_size()).into()) + impl $top<$crate::object::LeanOwned> { + /// Allocate a new constructor object for the given tag (variant). + /// + /// Panics if `tag` is out of range for this type's `LAYOUTS`. + pub fn alloc(tag: u8) -> Self { + let layouts = ::LAYOUTS; + let layout = layouts[tag as usize]; + Self::new( + $crate::object::LeanCtor::alloc(tag, layout.num_obj, layout.scalar_size()) + .into(), + ) } } }; } - -/// Declare a Lean structure or multi-variant inductive and all its field -/// layouts in one shot. -/// -/// **Structure form** (one layout, tag 0): -/// ```ignore -/// lean_inductive! { -/// LeanPoint { num_obj: 2 } -/// } -/// ``` -/// -/// **Multi-variant form** (one wrapper per variant + top-level dispatch type): -/// ```ignore -/// lean_inductive! { -/// LeanCompareResult { -/// LeanCompareMatched { tag: 0 }, -/// LeanCompareMismatch { tag: 1, num_64: 3 }, -/// LeanCompareNotFound { tag: 2 }, -/// } -/// } -/// -/// // Read side: -/// match result.as_ctor().tag() { -/// 1 => { -/// let m = LeanCompareMismatch::from_ctor(result.as_ctor()); -/// let first_diff = m.get_num_64(2); -/// } -/// _ => { /* matched or not found */ } -/// } -/// -/// // Write side: -/// let m = LeanCompareMismatch::alloc(); -/// m.set_num_64(0, lean_size); -/// let result: LeanCompareResult = m.into(); -/// ``` -/// -/// The form is disambiguated by the token after the first inner ident: `:` -/// (key-value pair) means structure, `{` (brace group) means variant. Variants -/// must be listed in tag order (tag 0 first, dense) and their names must differ -/// from the top-level name. -/// -/// This macro composes [`lean_domain_type!`] + [`lean_ctor!`]. Use those -/// directly if you need to split the domain-type declaration from the layout -/// (e.g. to declare wrappers that aren't ctors, or to attach the layout from a -/// different module). -#[macro_export] -macro_rules! lean_inductive { - // --- Structure form: LeanFoo { num_obj: 1, num_64: 2 } --- - // - // Distinguished from the multi-variant arm by the trailing `:` after the - // first inner ident (vs. `{` for the multi-variant case). - ( - $(#[$top_meta:meta])* - $top:ident { $($key:ident : $val:expr),* $(,)? } - ) => { - $crate::lean_domain_type! { $(#[$top_meta])* $top; } - $crate::lean_ctor!($top { $($key : $val),* }); - }; - - // --- Multi-variant form: LeanFoo { Variant1 { ... }, Variant2 { ... } } --- - ( - $(#[$top_meta:meta])* - $top:ident { - $( - $(#[$var_meta:meta])* - $variant:ident { $($key:ident : $val:expr),* $(,)? } - ),+ $(,)? - } - ) => { - $crate::lean_domain_type! { - $(#[$top_meta])* $top; - $( $(#[$var_meta])* $variant; )+ - } - $( $crate::lean_ctor!($variant { $($key : $val),* }); )+ - - impl - $crate::object::LeanCtorLayout<{ [ $( stringify!($variant) ),+ ].len() }> - for $top - { - const LAYOUTS: [ - $crate::object::SingleCtorLayout; - [ $( stringify!($variant) ),+ ].len() - ] = [ - $( - <$variant<$crate::object::LeanOwned> - as $crate::object::LeanCtorLayout<1>>::LAYOUTS[0], - )+ - ]; - } - - $( - impl From<$variant<$crate::object::LeanOwned>> - for $top<$crate::object::LeanOwned> - { - #[inline] - fn from(v: $variant<$crate::object::LeanOwned>) -> Self { - Self::new(v.into()) - } - } - )+ - }; -} diff --git a/src/object.rs b/src/object.rs index f0d98ef..1497534 100644 --- a/src/object.rs +++ b/src/object.rs @@ -985,25 +985,25 @@ impl From> for LeanOwned { /// the object field count is read from the header and added internally, so /// `get_usize(0)` reads the first `USize` field. /// -/// The [`lean_ctor!`](crate::lean_ctor) and [`lean_inductive!`](crate::lean_inductive) -/// macros compute these byte offsets automatically from declared field counts -/// and attach typed accessor methods, so users don't need to calculate offsets -/// manually. Reading all fields of `Foo` in C and via the generated accessors: +/// Reading all fields of `Foo` in C vs. via [`lean_inductive!`](crate::lean_inductive) +/// (which computes byte offsets from declared field counts): /// /// ```c -/// // C — all offsets are from lean_ctor_obj_cptr -/// lean_object* name = lean_ctor_get(o, 0); // array index 0 -/// size_t size = lean_ctor_get_usize(o, 1); // array index 1 (num_objs + 0) -/// uint64_t count = lean_ctor_get_uint64(o, 16); // byte offset (1 obj + 1 usize) * 8 -/// uint8_t flag = lean_ctor_get_uint8(o, 24); // byte offset 16 + sizeof(uint64_t) +/// // C — `lean_ctor_get`/`get_usize` take a pointer-sized field index; +/// // `lean_ctor_get_uint*` takes an absolute byte offset from `lean_ctor_obj_cptr`. +/// lean_object* name = lean_ctor_get(o, 0); // object field 0 +/// size_t size = lean_ctor_get_usize(o, 1); // USize field at index (num_objs + 0) +/// uint64_t count = lean_ctor_get_uint64(o, 16); // (1 obj + 1 usize) * 8 +/// uint8_t flag = lean_ctor_get_uint8(o, 24); // 16 + sizeof(uint64_t) /// ``` /// /// ```ignore -/// // Rust — generated by lean_ctor! / lean_inductive! (no manual offsets) -/// let name = foo.get_obj(0); // object field -/// let size = foo.get_usize(0); // first USize field -/// let count = foo.get_num_64(0); // first 8-byte scalar -/// let flag = foo.get_num_8(0); // first 1-byte scalar +/// // Rust — from a `lean_inductive! { LeanFoo [ { num_obj: 1, num_usize: 1, +/// // num_64: 1, num_8: 1 } ] }`: +/// let name = foo.get_obj(0); +/// let size = foo.get_usize(0); +/// let count = foo.get_num_64(0); +/// let flag = foo.get_num_8(0); /// ``` #[repr(transparent)] pub struct LeanCtor(R); @@ -1190,20 +1190,17 @@ impl From> for LeanOwned { // LeanCtorLayout — layout metadata for structures and inductive variants // ============================================================================= -/// Layout of a single `lean_ctor_object`: its tag and field counts per -/// scalar type size. Describes both structures (one layout, tag 0) and one -/// variant of a multi-variant inductive (tag N). +/// Memory layout of a single Lean constructor (one `structure` or one variant +/// of an `inductive`): counts of object, `USize`, and 64/32/16/8-bit scalar +/// fields. /// -/// Scalar fields are ordered after object/USize fields by **descending size** -/// (8B, 4B, 2B, 1B). Within a given size, Lean preserves declaration order. +/// Fields are laid out in the order: object → `USize` → descending scalar size +/// (8B → 4B → 2B → 1B). Within a size, Lean preserves declaration order. /// -/// Use [`lean_ctor!`](crate::lean_ctor) to attach this layout to a structure -/// or to a per-variant wrapper type; use [`lean_inductive!`](crate::lean_inductive) -/// to wire variant wrappers to the top-level inductive type. +/// The tag is not stored here — it's the index of this layout in the owning +/// type's [`LeanCtorLayout::LAYOUTS`] slice. #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub struct SingleCtorLayout { - /// Constructor tag. Lean's `LEAN_MAX_CTOR_TAG` fits in `u8`. - pub tag: u8, pub num_obj: usize, pub num_usize: usize, pub num_64: usize, @@ -1214,7 +1211,6 @@ pub struct SingleCtorLayout { impl SingleCtorLayout { pub const ZERO: Self = Self { - tag: 0, num_obj: 0, num_usize: 0, num_64: 0, @@ -1223,7 +1219,8 @@ impl SingleCtorLayout { num_8: 0, }; - /// Total byte size of the scalar region (USize + 64 + 32 + 16 + 8 groups). + /// `scalar_sz` argument for `lean_alloc_ctor`: total bytes needed for all + /// fixed-size scalar fields after the object and `USize` fields. #[inline] pub const fn scalar_size(&self) -> usize { self.num_usize * size_of::() @@ -1233,8 +1230,8 @@ impl SingleCtorLayout { + self.num_8 } - /// Byte offset from `lean_ctor_obj_cptr` where fixed-size scalar fields begin - /// (after object fields and USize fields). + /// Byte offset from `lean_ctor_obj_cptr` to the first fixed-size scalar + /// field, skipping the preceding object and `USize` fields. #[inline] pub const fn scalar_base(&self) -> usize { (self.num_obj + self.num_usize) * size_of::() @@ -1258,15 +1255,15 @@ impl SingleCtorLayout { } } -/// Declarative layout metadata for ctor-backed domain types. +/// Per-constructor layouts for a Lean `structure` or `inductive`. /// -/// - Structures and per-variant wrappers implement `LeanCtorLayout<1>` via -/// [`lean_ctor!`](crate::lean_ctor) (the wrapper represents a single -/// `lean_ctor_object` layout). -/// - Top-level multi-variant inductives implement `LeanCtorLayout` where -/// `N` is the variant count, via [`lean_inductive!`](crate::lean_inductive). -pub trait LeanCtorLayout { - const LAYOUTS: [SingleCtorLayout; N]; +/// `LAYOUTS[tag]` is the [`SingleCtorLayout`] for the constructor with that +/// Lean tag. A `structure` yields a 1-element slice (tag 0); an `inductive` +/// yields one entry per variant in declaration order. +/// +/// Implemented automatically by [`lean_inductive!`](crate::lean_inductive). +pub trait LeanCtorLayout { + const LAYOUTS: &'static [SingleCtorLayout]; } // ============================================================================= @@ -2085,16 +2082,14 @@ mod layout_tests { #[test] fn zero_layout_has_zero_size() { let z = SingleCtorLayout::ZERO; - assert_eq!(z.tag, 0); assert_eq!(z.num_obj, 0); assert_eq!(z.scalar_size(), 0); assert_eq!(z.scalar_base(), 0); } #[test] - fn scalar_size_sums_all_groups() { + fn scalar_size_sums_all_scalar_sizes() { let l = SingleCtorLayout { - tag: 0, num_obj: 0, num_usize: 2, num_64: 3, @@ -2113,7 +2108,7 @@ mod layout_tests { num_usize: 2, ..SingleCtorLayout::ZERO }; - // 3 obj slots + 2 usize slots, all pointer-width + // 3 object fields + 2 USize fields, all pointer-width assert_eq!(l.scalar_base(), 5 * size_of::()); } @@ -2126,62 +2121,57 @@ mod layout_tests { num_32: 2, num_16: 1, num_8: 3, - tag: 0, }; let base = l.scalar_base(); - // 64-bit group starts at `base`. + // 8-byte scalars start at `base`. assert_eq!(l.offset_64(0), base); assert_eq!(l.offset_64(1), base + 8); - // 32-bit group starts after 2 u64s. + // 4-byte scalars start after 2 u64s. assert_eq!(l.offset_32(0), base + 16); assert_eq!(l.offset_32(1), base + 16 + 4); - // 16-bit group starts after 2 u64s + 2 u32s. + // 2-byte scalars start after 2 u64s + 2 u32s. assert_eq!(l.offset_16(0), base + 16 + 8); - // 8-bit group starts after 2 u64s + 2 u32s + 1 u16. + // 1-byte scalars start after 2 u64s + 2 u32s + 1 u16. assert_eq!(l.offset_8(0), base + 16 + 8 + 2); assert_eq!(l.offset_8(2), base + 16 + 8 + 2 + 2); } #[test] - fn offsets_handle_empty_groups() { - // u64 + u8, skipping u32/u16 entirely. + fn offsets_skip_absent_scalar_sizes() { + // u64 + u8, no u32/u16 fields. let l = SingleCtorLayout { num_64: 1, num_8: 1, ..SingleCtorLayout::ZERO }; assert_eq!(l.offset_64(0), 0); - // u8 sits immediately after the u64: 32/16 groups are zero-width. + // u8 sits immediately after the u64: no u32/u16 to skip. assert_eq!(l.offset_8(0), 8); assert_eq!(l.scalar_size(), 8 + 1); } #[test] - fn layout_is_copy_in_const_context() { + fn layout_slice_is_usable_in_const_context() { // Confirms SingleCtorLayout can be read through LeanCtorLayout in const // code — this is what the generated accessor methods rely on. struct Marker; - impl LeanCtorLayout<2> for Marker { - const LAYOUTS: [SingleCtorLayout; 2] = [ + impl LeanCtorLayout for Marker { + const LAYOUTS: &'static [SingleCtorLayout] = &[ SingleCtorLayout { - tag: 0, num_obj: 1, ..SingleCtorLayout::ZERO }, SingleCtorLayout { - tag: 1, num_64: 2, ..SingleCtorLayout::ZERO }, ]; } - const L0: SingleCtorLayout = >::LAYOUTS[0]; - const L1: SingleCtorLayout = >::LAYOUTS[1]; + const L0: SingleCtorLayout = ::LAYOUTS[0]; + const L1: SingleCtorLayout = ::LAYOUTS[1]; const OFF: usize = L1.offset_64(1); - assert_eq!(L0.tag, 0); assert_eq!(L0.num_obj, 1); - assert_eq!(L1.tag, 1); assert_eq!(L1.num_64, 2); assert_eq!(OFF, 8); } diff --git a/src/test_ffi.rs b/src/test_ffi.rs index ecb85a8..79482e7 100644 --- a/src/test_ffi.rs +++ b/src/test_ffi.rs @@ -32,48 +32,45 @@ crate::lean_domain_type! { crate::lean_inductive! { /// Lean `ScalarStruct` — structure ScalarStruct where obj : Nat; u8val : UInt8; u32val : UInt32; u64val : UInt64 - LeanScalarStruct { num_obj: 1, num_64: 1, num_32: 1, num_8: 1 } + LeanScalarStruct [ { num_obj: 1, num_64: 1, num_32: 1, num_8: 1 } ] } crate::lean_inductive! { /// Lean `ExtScalarStruct` — all scalar types - LeanExtScalarStruct { num_obj: 1, num_64: 2, num_32: 2, num_16: 1, num_8: 1 } + LeanExtScalarStruct [ { num_obj: 1, num_64: 2, num_32: 2, num_16: 1, num_8: 1 } ] } crate::lean_inductive! { /// Lean `USizeStruct` — structure USizeStruct where obj : Nat; uval : USize; u8val : UInt8 - LeanUSizeStruct { num_obj: 1, num_usize: 1, num_8: 1 } + LeanUSizeStruct [ { num_obj: 1, num_usize: 1, num_8: 1 } ] } crate::lean_inductive! { /// Lean `USizeMixedStruct` — structure with USize + u64 + u32 + Bool - LeanUSizeMixedStruct { num_obj: 1, num_usize: 1, num_64: 1, num_32: 1, num_8: 1 } + LeanUSizeMixedStruct [ { num_obj: 1, num_usize: 1, num_64: 1, num_32: 1, num_8: 1 } ] } crate::lean_inductive! { /// Lean `BoolStruct` — structure BoolStruct where obj : Nat; b1 : Bool; b2 : Bool; b3 : Bool - LeanBoolStruct { num_obj: 1, num_8: 3 } + LeanBoolStruct [ { num_obj: 1, num_8: 3 } ] } crate::lean_inductive! { /// Lean `MultiU32Struct` — structure MultiU32Struct where obj : Nat; a : UInt32; b : UInt32; c : UInt32 - LeanMultiU32Struct { num_obj: 1, num_32: 3 } + LeanMultiU32Struct [ { num_obj: 1, num_32: 3 } ] } crate::lean_inductive! { /// Lean `Outer` — structure containing ScalarStruct + String + UInt64 - LeanOuter { num_obj: 2, num_64: 1 } + LeanOuter [ { num_obj: 2, num_64: 1 } ] } crate::lean_inductive! { /// Lean `InductiveHolder` — structure containing TestInductive + UInt32 - LeanInductiveHolder { num_obj: 1, num_32: 1 } + LeanInductiveHolder [ { num_obj: 1, num_32: 1 } ] } crate::lean_inductive! { - /// Lean `TestInductive` — multi-variant inductive for tag dispatch testing. - /// tag 0 (empty, scalar-unboxed), tag 1 (2 u64), tag 2 (1 obj + 1 u32 + 1 bool). - LeanTestInductive { - /// tag 0 — no fields (scalar-unboxed by Lean) - LeanTestInductiveEmpty { tag: 0 }, - /// tag 1 — 2 u64 fields - LeanTestInductiveWithScalars { tag: 1, num_64: 2 }, - /// tag 2 — 1 obj + 1 u32 + 1 bool - LeanTestInductiveWithMixed { tag: 2, num_obj: 1, num_32: 1, num_8: 1 }, - } + /// Lean `TestInductive` — | empty | withScalars (a b : UInt64) | withMixed (obj : Nat) (x : UInt32) (flag : Bool) + /// Tag 0 is scalar-unboxed by Lean; tags 1–2 are ctor objects. + LeanTestInductive [ + { }, // tag 0 — empty + { num_64: 2 }, // tag 1 — withScalars (2 u64) + { num_obj: 1, num_32: 1, num_8: 1 }, // tag 2 — withMixed (1 obj + 1 u32 + 1 bool) + ] } /// Build a Lean Nat from a Rust Nat (delegates to `Nat::to_lean`). @@ -285,7 +282,7 @@ pub(crate) extern "C" fn rs_roundtrip_scalar_struct( let u32val = ptr.get_num_32(0); let u8val = ptr.get_num_8(0); - let out = LeanScalarStruct::alloc(); + let out = LeanScalarStruct::alloc(0); out.set_obj(0, build_nat(&obj_nat)); out.set_num_64(0, u64val); out.set_num_32(0, u32val); @@ -416,7 +413,7 @@ pub(crate) extern "C" fn rs_roundtrip_ext_scalar_struct( let u16val = ptr.get_num_16(0); let u8val = ptr.get_num_8(0); - let out = LeanExtScalarStruct::alloc(); + let out = LeanExtScalarStruct::alloc(0); out.set_obj(0, build_nat(&obj_nat)); out.set_num_64(0, u64val); out.set_num_64(1, fval.to_bits()); @@ -442,7 +439,7 @@ pub(crate) extern "C" fn rs_roundtrip_usize_struct( let uval = ptr.get_usize(0); let u8val = ptr.get_num_8(0); - let out = LeanUSizeStruct::alloc(); + let out = LeanUSizeStruct::alloc(0); out.set_obj(0, build_nat(&obj_nat)); out.set_usize(0, uval); out.set_num_8(0, u8val); @@ -462,7 +459,7 @@ pub(crate) extern "C" fn rs_roundtrip_usize_mixed_struct( let u32val = ptr.get_num_32(0); let bval = ptr.get_num_8(0) != 0; - let out = LeanUSizeMixedStruct::alloc(); + let out = LeanUSizeMixedStruct::alloc(0); out.set_obj(0, build_nat(&obj_nat)); out.set_usize(0, uval); out.set_num_64(0, u64val); @@ -487,7 +484,7 @@ pub(crate) extern "C" fn rs_roundtrip_bool_struct( let b2 = ptr.get_num_8(1); let b3 = ptr.get_num_8(2); - let out = LeanBoolStruct::alloc(); + let out = LeanBoolStruct::alloc(0); out.set_obj(0, build_nat(&obj_nat)); out.set_num_8(0, b1); out.set_num_8(1, b2); @@ -507,7 +504,7 @@ pub(crate) extern "C" fn rs_roundtrip_multi_u32_struct( let b = ptr.get_num_32(1); let c = ptr.get_num_32(2); - let out = LeanMultiU32Struct::alloc(); + let out = LeanMultiU32Struct::alloc(0); out.set_obj(0, build_nat(&obj_nat)); out.set_num_32(0, a); out.set_num_32(1, b); @@ -532,33 +529,31 @@ pub(crate) extern "C" fn rs_roundtrip_test_inductive( return LeanTestInductive::new(ptr.inner().to_owned_ref()); } - let ctor = ptr.as_ctor(); - match ctor.tag() { + let tag = ptr.as_ctor().tag(); + match tag { 1 => { - // withScalars: use the variant domain type - let src = LeanTestInductiveWithScalars::from_ctor(ctor); - let a = src.get_num_64(0); - let b = src.get_num_64(1); + // withScalars: 2 u64 fields + let a = ptr.get_num_64(0); + let b = ptr.get_num_64(1); - let dst = LeanTestInductiveWithScalars::alloc(); + let dst = LeanTestInductive::alloc(1); dst.set_num_64(0, a); dst.set_num_64(1, b); - dst.into() + dst } 2 => { - // withMixed: use the variant domain type - let src = LeanTestInductiveWithMixed::from_ctor(ctor); - let obj_nat = Nat::from_obj(&src.as_ctor().get(0)); - let x = src.get_num_32(0); - let flag = src.get_num_8(0); + // withMixed: 1 obj + 1 u32 + 1 bool + let obj_nat = Nat::from_obj(&ptr.get_obj(0)); + let x = ptr.get_num_32(0); + let flag = ptr.get_num_8(0); - let dst = LeanTestInductiveWithMixed::alloc(); + let dst = LeanTestInductive::alloc(2); dst.set_obj(0, build_nat(&obj_nat)); dst.set_num_32(0, x); dst.set_num_8(0, flag); - dst.into() + dst } - _ => unreachable!("Invalid TestInductive tag: {}", ctor.tag()), + _ => unreachable!("Invalid TestInductive tag: {tag}"), } } @@ -585,7 +580,7 @@ pub(crate) extern "C" fn rs_roundtrip_outer( let inner_u8 = inner_wrapped.get_num_8(0); let inner_obj = Nat::from_obj(&inner_wrapped.as_ctor().get(0)); - let new_inner = LeanScalarStruct::alloc(); + let new_inner = LeanScalarStruct::alloc(0); new_inner.set_obj(0, build_nat(&inner_obj)); new_inner.set_num_64(0, inner_u64); new_inner.set_num_32(0, inner_u32); @@ -595,7 +590,7 @@ pub(crate) extern "C" fn rs_roundtrip_outer( let new_label = LeanString::new(&label_ref.as_string().to_string()); // Write: construct new Outer - let out = LeanOuter::alloc(); + let out = LeanOuter::alloc(0); out.set_obj(0, new_inner); out.set_obj(1, new_label); out.set_num_64(0, count); @@ -616,7 +611,7 @@ pub(crate) extern "C" fn rs_roundtrip_inductive_holder( let new_value = rs_roundtrip_test_inductive(LeanTestInductive(value_ref)); // Write - let out = LeanInductiveHolder::alloc(); + let out = LeanInductiveHolder::alloc(0); out.set_obj(0, new_value); out.set_num_32(0, tag_copy); out @@ -1791,26 +1786,26 @@ mod layout_sanity { type Outer = super::LeanOuter; type Holder = super::LeanInductiveHolder; type Ind = super::LeanTestInductive; - type Empty = super::LeanTestInductiveEmpty; - type WScalars = super::LeanTestInductiveWithScalars; - type WMixed = super::LeanTestInductiveWithMixed; - fn layout>() -> SingleCtorLayout { - >::LAYOUTS[0] + /// The single layout for a structure (1-variant type). + fn struct_layout() -> SingleCtorLayout { + let layouts = ::LAYOUTS; + assert_eq!(layouts.len(), 1, "expected a 1-variant (structure) type"); + layouts[0] } #[test] fn scalar_struct_layout() { - let l = layout::(); - assert_eq!((l.tag, l.num_obj), (0, 1)); + let l = struct_layout::(); + assert_eq!(l.num_obj, 1); assert_eq!((l.num_64, l.num_32, l.num_16, l.num_8), (1, 1, 0, 1)); assert_eq!(l.scalar_size(), 8 + 4 + 1); } #[test] fn ext_scalar_struct_layout() { - let l = layout::(); - assert_eq!((l.tag, l.num_obj), (0, 1)); + let l = struct_layout::(); + assert_eq!(l.num_obj, 1); assert_eq!((l.num_64, l.num_32, l.num_16, l.num_8), (2, 2, 1, 1)); assert_eq!(l.offset_32(0), l.scalar_base() + 16); assert_eq!(l.offset_8(0), l.scalar_base() + 16 + 8 + 2); @@ -1818,7 +1813,7 @@ mod layout_sanity { #[test] fn usize_struct_layout() { - let l = layout::(); + let l = struct_layout::(); assert_eq!(l.num_usize, 1); assert_eq!(l.num_8, 1); // USize sits between obj fields and fixed-width scalars. @@ -1827,7 +1822,7 @@ mod layout_sanity { #[test] fn usize_mixed_struct_layout() { - let l = layout::(); + let l = struct_layout::(); assert_eq!( (l.num_obj, l.num_usize, l.num_64, l.num_32, l.num_8), (1, 1, 1, 1, 1) @@ -1836,59 +1831,42 @@ mod layout_sanity { #[test] fn bool_struct_layout() { - let l = layout::(); + let l = struct_layout::(); assert_eq!(l.num_8, 3); assert_eq!(l.offset_8(2), l.scalar_base() + 2); } #[test] fn multi_u32_struct_layout() { - let l = layout::(); + let l = struct_layout::(); assert_eq!(l.num_32, 3); assert_eq!(l.offset_32(2), l.scalar_base() + 8); } #[test] fn outer_layout() { - let l = layout::(); + let l = struct_layout::(); assert_eq!((l.num_obj, l.num_64), (2, 1)); } #[test] fn inductive_holder_layout() { - let l = layout::(); + let l = struct_layout::(); assert_eq!((l.num_obj, l.num_32), (1, 1)); } #[test] - fn inductive_variant_tags_match_positions() { - let layouts = >::LAYOUTS; - assert_eq!(layouts[0].tag, 0); - assert_eq!(layouts[1].tag, 1); - assert_eq!(layouts[2].tag, 2); - } - - #[test] - fn inductive_variant_layouts_match_variants() { - let layouts = >::LAYOUTS; - // empty: tag 0, no fields - assert_eq!(layouts[0], layout::()); - // withScalars: tag 1, 2 u64 - assert_eq!(layouts[1], layout::()); + fn inductive_variant_layouts() { + let layouts = ::LAYOUTS; + assert_eq!(layouts.len(), 3); + // tag 0: empty + assert_eq!(layouts[0], SingleCtorLayout::ZERO); + // tag 1: withScalars (2 u64) assert_eq!(layouts[1].num_64, 2); - // withMixed: tag 2, 1 obj + 1 u32 + 1 bool - assert_eq!(layouts[2], layout::()); + // tag 2: withMixed (1 obj + 1 u32 + 1 bool) assert_eq!( (layouts[2].num_obj, layouts[2].num_32, layouts[2].num_8), (1, 1, 1) ); } - - #[test] - fn ctor_tag_matches_layout_tag() { - assert_eq!(Empty::ctor_tag(), 0); - assert_eq!(WScalars::ctor_tag(), 1); - assert_eq!(WMixed::ctor_tag(), 2); - assert_eq!(Scalar::ctor_tag(), 0); - } } From 784e69cb5649266a9f83515271bb2c43b9b64435 Mon Sep 17 00:00:00 2001 From: samuelburnham <45365069+samuelburnham@users.noreply.github.com> Date: Thu, 16 Apr 2026 11:49:41 -0400 Subject: [PATCH 16/17] Update tests --- README.md | 18 ++----- Tests/FFI.lean | 9 ++++ Tests/Gen.lean | 12 +++++ src/test_ffi.rs | 126 ++++++++++++++++++++++++++++++++++++++---------- 4 files changed, 124 insertions(+), 41 deletions(-) diff --git a/README.md b/README.md index 255ff4d..b1feca6 100644 --- a/README.md +++ b/README.md @@ -136,14 +136,9 @@ Variant layouts are listed inside `[ … ]`, in tag order. ```rust lean_ffi::lean_inductive! { LeanPoint [ { num_obj: 2 } ] } -impl LeanPoint { - pub fn mk(x: LeanNat, y: LeanNat) -> Self { - let p = Self::alloc(0); - p.set_obj(0, x); - p.set_obj(1, y); - p - } -} +let p = LeanPoint::alloc(0); +p.set_obj(0, x_nat); +p.set_obj(1, y_nat); ``` **Inductive** — from: @@ -228,13 +223,6 @@ first 1-byte scalar, etc. No hand-rolled byte offsets: lean_ffi::lean_inductive! { LeanMyStruct [ { num_obj: 1, num_64: 1, num_32: 1, num_8: 1 } ] } - -impl LeanMyStruct { - pub fn obj(&self) -> LeanBorrowed<'_> { self.get_obj(0) } - pub fn u64val(&self) -> u64 { self.get_num_64(0) } - pub fn u32val(&self) -> u32 { self.get_num_32(0) } - pub fn u8val(&self) -> u8 { self.get_num_8(0) } -} ``` For raw access (non-standard layouts, hand-tuned code), `LeanCtor` exposes diff --git a/Tests/FFI.lean b/Tests/FFI.lean index 62ee27e..f131355 100644 --- a/Tests/FFI.lean +++ b/Tests/FFI.lean @@ -83,6 +83,9 @@ opaque roundtripOuter : @& Outer → Outer @[extern "rs_roundtrip_inductive_holder"] opaque roundtripInductiveHolder : @& InductiveHolder → InductiveHolder +@[extern "rs_roundtrip_struct_in_variant"] +opaque roundtripStructInVariant : @& StructInVariant → StructInVariant + @[extern "rs_roundtrip_float"] opaque roundtripFloat : Float → Float @@ -400,6 +403,12 @@ def borrowedRoundtripTests : TestSeq := test "InductiveHolder empty" (roundtripInductiveHolder ⟨.empty, 0⟩ == ⟨.empty, 0⟩) ++ test "InductiveHolder withScalars" (roundtripInductiveHolder ⟨.withScalars 10 20, 42⟩ == ⟨.withScalars 10 20, 42⟩) ++ test "InductiveHolder withMixed" (roundtripInductiveHolder ⟨.withMixed 7 99 true, 0xFFFFFFFF⟩ == ⟨.withMixed 7 99 true, 0xFFFFFFFF⟩) ++ + -- StructInVariant: inductive whose variants contain structures + test "StructInVariant empty" (roundtripStructInVariant .empty == .empty) ++ + test "StructInVariant withPoint zeros" (roundtripStructInVariant (.withPoint ⟨0, 0⟩) == .withPoint ⟨0, 0⟩) ++ + test "StructInVariant withPoint mixed" (roundtripStructInVariant (.withPoint ⟨42, 99⟩) == .withPoint ⟨42, 99⟩) ++ + test "StructInVariant withScalar zeros" (roundtripStructInVariant (.withScalar ⟨0, 0, 0, 0⟩ 0) == .withScalar ⟨0, 0, 0, 0⟩ 0) ++ + test "StructInVariant withScalar max" (roundtripStructInVariant (.withScalar ⟨1, 0xFF, 0xFFFFFFFF, 0xFFFFFFFFFFFFFFFF⟩ 0xFFFFFFFF) == .withScalar ⟨1, 0xFF, 0xFFFFFFFF, 0xFFFFFFFFFFFFFFFF⟩ 0xFFFFFFFF) ++ test "External all fields" (externalAllFields (mkRustData 42 99 "hello") == "42:99:hello") ++ test "External all fields zeros" (externalAllFields (mkRustData 0 0 "") == "0:0:") ++ test "External large u64" (rustDataGetX (mkRustData 0xFFFFFFFFFFFFFFFF 0 "test") == 0xFFFFFFFFFFFFFFFF) ++ diff --git a/Tests/Gen.lean b/Tests/Gen.lean index 06b8ed6..6fcdfbe 100644 --- a/Tests/Gen.lean +++ b/Tests/Gen.lean @@ -178,6 +178,18 @@ structure InductiveHolder where tag_copy : UInt32 deriving Repr, BEq, DecidableEq, Inhabited +/-- Inductive whose variants carry structure-typed fields. + Tests that variant accessors work when the variant's field is itself a + ctor-backed Lean structure. + - empty: tag 0, no fields (scalar representation) + - withPoint: tag 1, 1 object field (a `Point`) + - withScalar: tag 2, 1 object field (a `ScalarStruct`) + 1 UInt32 -/ +inductive StructInVariant + | empty + | withPoint (p : Point) + | withScalar (s : ScalarStruct) (extra : UInt32) +deriving Repr, BEq, DecidableEq, Inhabited + /-! ## Shrinkable instances -/ instance : Shrinkable Nat where diff --git a/src/test_ffi.rs b/src/test_ffi.rs index 79482e7..05dc0c9 100644 --- a/src/test_ffi.rs +++ b/src/test_ffi.rs @@ -22,14 +22,25 @@ use crate::object::{ // ============================================================================= crate::lean_domain_type! { - /// Lean `Point` — structure Point where x : Nat; y : Nat - LeanPoint; - /// Lean `NatTree` — inductive NatTree | leaf : Nat → NatTree | node : NatTree → NatTree → NatTree - LeanNatTree; /// Lean `RustData` — opaque external object LeanRustData; } +crate::lean_inductive! { + /// Lean `Point` — structure Point where x : Nat; y : Nat + LeanPoint [ { num_obj: 2 } ] +} + +crate::lean_inductive! { + /// Lean `NatTree` — | leaf (n : Nat) | node (l r : NatTree) + /// Recursive inductive: the `node` variant's object fields point to more + /// `NatTree` values, so this exercises nested-inductive access. + LeanNatTree [ + { num_obj: 1 }, // tag 0 — leaf (1 Nat) + { num_obj: 2 }, // tag 1 — node (2 NatTree children) + ] +} + crate::lean_inductive! { /// Lean `ScalarStruct` — structure ScalarStruct where obj : Nat; u8val : UInt8; u32val : UInt32; u64val : UInt64 LeanScalarStruct [ { num_obj: 1, num_64: 1, num_32: 1, num_8: 1 } ] @@ -73,6 +84,16 @@ crate::lean_inductive! { ] } +crate::lean_inductive! { + /// Lean `StructInVariant` — variants carry structure-typed fields. + /// | empty | withPoint (p : Point) | withScalar (s : ScalarStruct) (extra : UInt32) + LeanStructInVariant [ + { }, // tag 0 — empty + { num_obj: 1 }, // tag 1 — withPoint (1 obj: Point) + { num_obj: 1, num_32: 1 }, // tag 2 — withScalar (1 obj: ScalarStruct, 1 u32) + ] +} + /// Build a Lean Nat from a Rust Nat (delegates to `Nat::to_lean`). fn build_nat(n: &Nat) -> LeanOwned { n.to_lean().into() @@ -163,13 +184,12 @@ pub(crate) extern "C" fn rs_roundtrip_option_nat( pub(crate) extern "C" fn rs_roundtrip_point( point_ptr: LeanPoint>, ) -> LeanPoint { - let ctor = point_ptr.as_ctor(); - let x = Nat::from_obj(&ctor.get(0)); - let y = Nat::from_obj(&ctor.get(1)); - let out = LeanCtor::alloc(0, 2, 0); - out.set(0, build_nat(&x)); - out.set(1, build_nat(&y)); - LeanPoint::new(out.into()) + let x = Nat::from_obj(&point_ptr.get_obj(0)); + let y = Nat::from_obj(&point_ptr.get_obj(1)); + let out = LeanPoint::alloc(0); + out.set_obj(0, build_nat(&x)); + out.set_obj(1, build_nat(&y)); + out } /// Round-trip a NatTree (inductive: leaf Nat | node NatTree NatTree). @@ -177,28 +197,28 @@ pub(crate) extern "C" fn rs_roundtrip_point( pub(crate) extern "C" fn rs_roundtrip_nat_tree( tree_ptr: LeanNatTree>, ) -> LeanNatTree { - LeanNatTree::new(roundtrip_nat_tree_recursive(&tree_ptr.as_ctor())) + roundtrip_nat_tree(&tree_ptr) } -fn roundtrip_nat_tree_recursive(ctor: &LeanCtor) -> LeanOwned { - match ctor.tag() { +fn roundtrip_nat_tree(tree: &LeanNatTree) -> LeanNatTree { + match tree.as_ctor().tag() { 0 => { // leaf : Nat → NatTree - let nat = Nat::from_obj(&ctor.get(0)); - let leaf = LeanCtor::alloc(0, 1, 0); - leaf.set(0, build_nat(&nat)); - leaf.into() + let nat = Nat::from_obj(&tree.get_obj(0)); + let out = LeanNatTree::alloc(0); + out.set_obj(0, build_nat(&nat)); + out } 1 => { - // node : NatTree → NatTree → NatTree - let left = roundtrip_nat_tree_recursive(&ctor.get(0).as_ctor()); - let right = roundtrip_nat_tree_recursive(&ctor.get(1).as_ctor()); - let node = LeanCtor::alloc(1, 2, 0); - node.set(0, left); - node.set(1, right); - node.into() + // node : NatTree → NatTree → NatTree (recurse into children) + let left = roundtrip_nat_tree(&LeanNatTree::from_ctor(tree.get_obj(0).as_ctor())); + let right = roundtrip_nat_tree(&LeanNatTree::from_ctor(tree.get_obj(1).as_ctor())); + let out = LeanNatTree::alloc(1); + out.set_obj(0, left); + out.set_obj(1, right); + out } - _ => panic!("Invalid NatTree tag: {}", ctor.tag()), + tag => panic!("Invalid NatTree tag: {tag}"), } } @@ -617,6 +637,60 @@ pub(crate) extern "C" fn rs_roundtrip_inductive_holder( out } +/// Round-trip a StructInVariant — an inductive whose variants hold structures. +/// +/// Exercises variant-dispatched access where the variant's payload is itself +/// a ctor-backed Lean structure (`Point` / `ScalarStruct`), re-using those +/// structures' own generated accessors for the inner reads and writes. +#[unsafe(no_mangle)] +pub(crate) extern "C" fn rs_roundtrip_struct_in_variant( + ptr: LeanStructInVariant>, +) -> LeanStructInVariant { + // Tag 0 (empty) — scalar-unboxed by Lean. + if ptr.inner().is_scalar() { + return LeanStructInVariant::new(ptr.inner().to_owned_ref()); + } + + let tag = ptr.as_ctor().tag(); + match tag { + 1 => { + // withPoint: clone the Point out and rebuild it via LeanPoint's accessors. + let src_pt = LeanPoint::from_ctor(ptr.get_obj(0).as_ctor()); + let x = Nat::from_obj(&src_pt.get_obj(0)); + let y = Nat::from_obj(&src_pt.get_obj(1)); + + let new_pt = LeanPoint::alloc(0); + new_pt.set_obj(0, build_nat(&x)); + new_pt.set_obj(1, build_nat(&y)); + + let dst = LeanStructInVariant::alloc(1); + dst.set_obj(0, new_pt); + dst + } + 2 => { + // withScalar: clone the ScalarStruct out + copy the trailing u32. + let src_ss = LeanScalarStruct::from_ctor(ptr.get_obj(0).as_ctor()); + let obj_nat = Nat::from_obj(&src_ss.get_obj(0)); + let u64val = src_ss.get_num_64(0); + let u32val = src_ss.get_num_32(0); + let u8val = src_ss.get_num_8(0); + let extra = ptr.get_num_32(0); + + let new_ss = LeanScalarStruct::alloc(0); + new_ss.set_obj(0, build_nat(&obj_nat)); + new_ss.set_num_64(0, u64val); + new_ss.set_num_32(0, u32val); + new_ss.set_num_8(0, u8val); + + let dst = LeanStructInVariant::alloc(2); + dst.set_obj(0, new_ss); + dst.set_num_32(0, extra); + dst + } + _ => unreachable!("Invalid StructInVariant tag: {tag}"), + } +} + // ============================================================================= // Float / Float32 / USize scalar roundtrips // ============================================================================= From 85187ff86de7fe12b274c564d7b2f433fcda4ddf Mon Sep 17 00:00:00 2001 From: samuelburnham <45365069+samuelburnham@users.noreply.github.com> Date: Thu, 16 Apr 2026 12:10:51 -0400 Subject: [PATCH 17/17] Multi-arg lean_inductive! --- src/lib.rs | 24 ++++++++++++++++++---- src/test_ffi.rs | 53 ++++++++++++++++++------------------------------- 2 files changed, 39 insertions(+), 38 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index ec55fd2..58a525a 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -203,14 +203,29 @@ macro_rules! lean_domain_type { /// /// `alloc(tag)` and every accessor panic if the index is out of range for /// the current variant. +/// +/// Multiple types can share one invocation, separated by `;`: +/// +/// ```ignore +/// lean_inductive! { +/// LeanPoint [ { num_obj: 2 } ]; +/// LeanNatTree [ +/// { num_obj: 1 }, // leaf +/// { num_obj: 2 }, // node +/// ]; +/// } +/// ``` #[macro_export] macro_rules! lean_inductive { ( - $(#[$top_meta:meta])* - $top:ident [ - $( { $($key:ident : $val:expr),* $(,)? } ),+ $(,)? - ] + $( + $(#[$top_meta:meta])* + $top:ident [ + $( { $($key:ident : $val:expr),* $(,)? } ),+ $(,)? + ] + );+ $(;)? ) => { + $( $crate::lean_domain_type! { $(#[$top_meta])* $top; } impl $crate::object::LeanCtorLayout for $top { @@ -327,5 +342,6 @@ macro_rules! lean_inductive { ) } } + )+ }; } diff --git a/src/test_ffi.rs b/src/test_ffi.rs index 05dc0c9..8165d8f 100644 --- a/src/test_ffi.rs +++ b/src/test_ffi.rs @@ -28,70 +28,55 @@ crate::lean_domain_type! { crate::lean_inductive! { /// Lean `Point` — structure Point where x : Nat; y : Nat - LeanPoint [ { num_obj: 2 } ] -} + LeanPoint [ { num_obj: 2 } ]; -crate::lean_inductive! { /// Lean `NatTree` — | leaf (n : Nat) | node (l r : NatTree) /// Recursive inductive: the `node` variant's object fields point to more /// `NatTree` values, so this exercises nested-inductive access. LeanNatTree [ { num_obj: 1 }, // tag 0 — leaf (1 Nat) { num_obj: 2 }, // tag 1 — node (2 NatTree children) - ] -} + ]; -crate::lean_inductive! { /// Lean `ScalarStruct` — structure ScalarStruct where obj : Nat; u8val : UInt8; u32val : UInt32; u64val : UInt64 - LeanScalarStruct [ { num_obj: 1, num_64: 1, num_32: 1, num_8: 1 } ] -} -crate::lean_inductive! { + LeanScalarStruct [ { num_obj: 1, num_64: 1, num_32: 1, num_8: 1 } ]; + /// Lean `ExtScalarStruct` — all scalar types - LeanExtScalarStruct [ { num_obj: 1, num_64: 2, num_32: 2, num_16: 1, num_8: 1 } ] -} -crate::lean_inductive! { + LeanExtScalarStruct [ { num_obj: 1, num_64: 2, num_32: 2, num_16: 1, num_8: 1 } ]; + /// Lean `USizeStruct` — structure USizeStruct where obj : Nat; uval : USize; u8val : UInt8 - LeanUSizeStruct [ { num_obj: 1, num_usize: 1, num_8: 1 } ] -} -crate::lean_inductive! { + LeanUSizeStruct [ { num_obj: 1, num_usize: 1, num_8: 1 } ]; + /// Lean `USizeMixedStruct` — structure with USize + u64 + u32 + Bool - LeanUSizeMixedStruct [ { num_obj: 1, num_usize: 1, num_64: 1, num_32: 1, num_8: 1 } ] -} -crate::lean_inductive! { + LeanUSizeMixedStruct [ { num_obj: 1, num_usize: 1, num_64: 1, num_32: 1, num_8: 1 } ]; + /// Lean `BoolStruct` — structure BoolStruct where obj : Nat; b1 : Bool; b2 : Bool; b3 : Bool - LeanBoolStruct [ { num_obj: 1, num_8: 3 } ] -} -crate::lean_inductive! { + LeanBoolStruct [ { num_obj: 1, num_8: 3 } ]; + /// Lean `MultiU32Struct` — structure MultiU32Struct where obj : Nat; a : UInt32; b : UInt32; c : UInt32 - LeanMultiU32Struct [ { num_obj: 1, num_32: 3 } ] -} -crate::lean_inductive! { + LeanMultiU32Struct [ { num_obj: 1, num_32: 3 } ]; + /// Lean `Outer` — structure containing ScalarStruct + String + UInt64 - LeanOuter [ { num_obj: 2, num_64: 1 } ] -} -crate::lean_inductive! { + LeanOuter [ { num_obj: 2, num_64: 1 } ]; + /// Lean `InductiveHolder` — structure containing TestInductive + UInt32 - LeanInductiveHolder [ { num_obj: 1, num_32: 1 } ] -} + LeanInductiveHolder [ { num_obj: 1, num_32: 1 } ]; -crate::lean_inductive! { /// Lean `TestInductive` — | empty | withScalars (a b : UInt64) | withMixed (obj : Nat) (x : UInt32) (flag : Bool) /// Tag 0 is scalar-unboxed by Lean; tags 1–2 are ctor objects. LeanTestInductive [ { }, // tag 0 — empty { num_64: 2 }, // tag 1 — withScalars (2 u64) { num_obj: 1, num_32: 1, num_8: 1 }, // tag 2 — withMixed (1 obj + 1 u32 + 1 bool) - ] -} + ]; -crate::lean_inductive! { /// Lean `StructInVariant` — variants carry structure-typed fields. /// | empty | withPoint (p : Point) | withScalar (s : ScalarStruct) (extra : UInt32) LeanStructInVariant [ { }, // tag 0 — empty { num_obj: 1 }, // tag 1 — withPoint (1 obj: Point) { num_obj: 1, num_32: 1 }, // tag 2 — withScalar (1 obj: ScalarStruct, 1 u32) - ] + ]; } /// Build a Lean Nat from a Rust Nat (delegates to `Nat::to_lean`).