From 8114b920f39b2a56d03e85876ee1f185b9ec8e1b Mon Sep 17 00:00:00 2001 From: Noah Stiltner Date: Sat, 13 Sep 2025 13:33:37 -0500 Subject: [PATCH 01/20] added 2 methods for stack zeroization for #810 --- zeroize/Cargo.toml | 2 + zeroize/src/lib.rs | 9 ++ zeroize/src/stack_sanitization.rs | 170 ++++++++++++++++++++++++++++ zeroize/tests/stack_sanitization.rs | 31 +++++ 4 files changed, 212 insertions(+) create mode 100644 zeroize/src/stack_sanitization.rs create mode 100644 zeroize/tests/stack_sanitization.rs diff --git a/zeroize/Cargo.toml b/zeroize/Cargo.toml index 08e27273..b23f44ae 100644 --- a/zeroize/Cargo.toml +++ b/zeroize/Cargo.toml @@ -19,12 +19,14 @@ edition = "2024" rust-version = "1.85" [dependencies] +psm = { version = "0.1.26", optional = true } serde = { version = "1.0", default-features = false, optional = true } zeroize_derive = { version = "1.4", path = "../zeroize_derive", optional = true } [features] default = ["alloc"] alloc = [] +stack_sanitization = ["psm"] std = ["alloc"] aarch64 = [] # NOTE: vestigial no-op feature; AArch64 support is always enabled now diff --git a/zeroize/src/lib.rs b/zeroize/src/lib.rs index 8d7b915f..f5caf3d3 100644 --- a/zeroize/src/lib.rs +++ b/zeroize/src/lib.rs @@ -250,6 +250,15 @@ mod aarch64; #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] mod x86; +#[cfg(feature = "stack_sanitization")] +mod stack_sanitization; + +#[cfg(feature = "stack_sanitization")] +pub use stack_sanitization::{secure_crypto_call, secure_crypto_call_heap}; + +#[cfg(all(feature = "stack_sanitization", feature = "alloc"))] +pub use stack_sanitization::create_aligned_vec; + use core::{ marker::{PhantomData, PhantomPinned}, mem::{MaybeUninit, size_of}, diff --git a/zeroize/src/stack_sanitization.rs b/zeroize/src/stack_sanitization.rs new file mode 100644 index 00000000..37c8620b --- /dev/null +++ b/zeroize/src/stack_sanitization.rs @@ -0,0 +1,170 @@ +//! Module for sanitizing the stack, sometimes referred to as "Stack Bleaching." + +use core::arch::asm; +use core::ptr; + +use crate::Zeroize; + +#[cfg(feature = "alloc")] +use alloc::{ + vec, + vec::{Vec} +}; + +/// Gets the current stack pointer +#[inline(never)] +fn get_stack_pointer() -> *mut u8 { + let sp: *mut u8; + #[cfg(target_arch = "x86_64")] + unsafe { + asm!("mov {}, rsp", out(reg) sp, options(nomem, nostack, preserves_flags)); + } + #[cfg(target_arch = "aarch64")] + unsafe { + asm!("mov {}, sp", out(reg) sp, options(nomem, nostack, preserves_flags)); + } + #[cfg(target_arch = "x86")] + unsafe { + asm!("mov {}, esp", out(reg) sp, options(nomem, nostack, preserves_flags)); + } + sp +} + +/// Clears stack memory between two stack pointers using volatile writes +#[inline(never)] +unsafe fn clear_stack_range(start_sp: *mut u8, end_sp: *mut u8) { + let start = start_sp.min(end_sp) as usize; + let end = start_sp.max(end_sp) as usize; + let size = end - start; + + if size == 0 || size > 1024 * 1024 { // Sanity check + return; + } + + // Clear using volatile writes to prevent optimization + let mut ptr = start as *mut u64; + // Align to 8-byte boundary to clear 8 bytes at a time + let end_ptr = (end & !7) as *mut u64; + + while ptr < end_ptr { + unsafe { + ptr::write_volatile(ptr, 0); + ptr = ptr.add(1); + } + } + + // Clear remaining bytes + let mut byte_ptr = ptr as *mut u8; + let byte_end = end as *mut u8; + while byte_ptr < byte_end { + unsafe { + ptr::write_volatile(byte_ptr, 0); + byte_ptr = byte_ptr.add(1); + } + } +} + +/// Wrapper function that captures stack state and clears after crypto operation +/// +/// # Safety +/// +/// * `crypto_fn` should be marked as `#[inline(never)]`, preventing register +/// reuse or stack layout changes +#[inline(never)] +pub unsafe fn secure_crypto_call(crypto_fn: F) -> R +where + F: FnOnce() -> R, +{ + // Get initial stack pointer + let initial_sp = get_stack_pointer(); + assert!(!initial_sp.is_null()); + + // Call the crypto function (this will use more stack) + let result = crypto_fn(); + + // Get stack pointer after crypto operation + let final_sp = psm::stack_pointer(); + assert!(!final_sp.is_null()); + debug_assert_ne!(initial_sp, final_sp); + + // Clear the stack range used by the crypto function + unsafe { + clear_stack_range(initial_sp, final_sp); + } + + result +} + +/// Wrapper function that captures stack state and clears after crypto operation +/// by using an allocation on the heap as the stack. +/// +/// If you wish to clear the registers, it is recommended to clear them from +/// within `crypto_fn()`. This function does not clear them for you. +/// +/// # Safety +/// +/// * `crypto_fn` should be marked as `#[inline(never)]`, preventing register +/// reuse and stack layout changes. +/// * The stack start address needs to be aligned for the target architecture, which is +/// typically 16 bytes for x86_64. +/// * The stack size needs to be a multiple of stack alignment required by +/// the target. +/// * The stack size must not overflow `isize`. +/// * The stack needs to be large enough for `crypto_fn()` to execute without +/// overflow. +/// * `crypto_fn()` must not unwind or return control flow by any other means +/// than by directly returning. +pub unsafe fn secure_crypto_call_heap(crypto_fn: F, stack: &mut [u8] ) -> R +where + F: FnOnce() -> R, +{ + let res = unsafe { + psm::on_stack(stack.as_mut_ptr(), stack.len(), || { + let res = crypto_fn(); + res + }) + }; + stack.zeroize(); + res +} + +/// Round up to the nearest multiple of alignment +const fn align_up(value: usize, alignment: usize) -> usize { + (value + alignment - 1) & !(alignment - 1) +} + +/// Creates an aligned Vec with the specified size in KB and alignment. +/// +/// This helps ensure that the safety requirements are met when using +/// `fn secure_crypto_call_heap()`. +/// +/// Both the data pointer and length will be aligned to the specified boundary. +#[cfg(feature = "alloc")] +pub fn create_aligned_vec(size_kb: usize, alignment: usize) -> Vec { + let size_bytes = size_kb * 1024; + // checking one of the safety conditions of `psm::on_stack()` + assert!(size_bytes <= isize::MAX as usize); + + let aligned_size = align_up(size_bytes, alignment); + + // Allocate extra space to ensure we can find an aligned region + let mut vec = vec![0u8; aligned_size + alignment]; + + // Find the aligned position within the vec + let ptr_addr = vec.as_ptr() as usize; + let aligned_addr = align_up(ptr_addr, alignment); + let offset = aligned_addr - ptr_addr; + + // Remove elements from the beginning to align the start + vec.drain(0..offset); + + // Truncate to the exact aligned size we want + vec.truncate(aligned_size); + + // Verify alignment (these will be optimized out in release builds) + debug_assert_eq!(vec.as_ptr() as usize % alignment, 0); + debug_assert_eq!(vec.len() % alignment, 0); + debug_assert_eq!(vec.len(), aligned_size); + + vec +} \ No newline at end of file diff --git a/zeroize/tests/stack_sanitization.rs b/zeroize/tests/stack_sanitization.rs new file mode 100644 index 00000000..3e298a9c --- /dev/null +++ b/zeroize/tests/stack_sanitization.rs @@ -0,0 +1,31 @@ +//! Stack sanitization integration tests + +#[cfg(all(feature = "stack_sanitization", feature = "alloc"))] +mod stack_sanitization_tests { + use zeroize::{create_aligned_vec, secure_crypto_call, secure_crypto_call_heap}; + + fn dummy_fn() -> (*const u8, u64) { + let temporary_data = 42; + let ptr = temporary_data as *const u8; + (ptr, 12345) + } + + #[test] + #[ignore = "segmentation fault"] + fn return_safety_and_zeroization_of_temp_data() { + // results in segmentation fault + let result = unsafe { secure_crypto_call(|| dummy_fn()) }; + assert_eq!(result.1, 12345); + // results in segmentation fault + // assert_eq!(unsafe {*result.0}, 42); + } + + #[test] + fn stack_sanitization_v2() { + let mut stack = create_aligned_vec(4, 16); + let result = unsafe { secure_crypto_call_heap(|| {dummy_fn()}, &mut stack)}; + assert_eq!(result.1, 12345); + // results in segmentation fault + // assert_eq!(unsafe {*result.0}, 42); + } +} \ No newline at end of file From 97197386ede8a5bf78d5aa3ca3f7a188be94ce01 Mon Sep 17 00:00:00 2001 From: Noah Stiltner Date: Sat, 13 Sep 2025 13:46:13 -0500 Subject: [PATCH 02/20] removed code that doesn't work without segfaults --- zeroize/src/lib.rs | 2 +- zeroize/src/stack_sanitization.rs | 84 ----------------------------- zeroize/tests/stack_sanitization.rs | 12 +---- 3 files changed, 2 insertions(+), 96 deletions(-) diff --git a/zeroize/src/lib.rs b/zeroize/src/lib.rs index f5caf3d3..e6984bc7 100644 --- a/zeroize/src/lib.rs +++ b/zeroize/src/lib.rs @@ -254,7 +254,7 @@ mod x86; mod stack_sanitization; #[cfg(feature = "stack_sanitization")] -pub use stack_sanitization::{secure_crypto_call, secure_crypto_call_heap}; +pub use stack_sanitization::secure_crypto_call_heap; #[cfg(all(feature = "stack_sanitization", feature = "alloc"))] pub use stack_sanitization::create_aligned_vec; diff --git a/zeroize/src/stack_sanitization.rs b/zeroize/src/stack_sanitization.rs index 37c8620b..46083b7e 100644 --- a/zeroize/src/stack_sanitization.rs +++ b/zeroize/src/stack_sanitization.rs @@ -11,90 +11,6 @@ use alloc::{ vec::{Vec} }; -/// Gets the current stack pointer -#[inline(never)] -fn get_stack_pointer() -> *mut u8 { - let sp: *mut u8; - #[cfg(target_arch = "x86_64")] - unsafe { - asm!("mov {}, rsp", out(reg) sp, options(nomem, nostack, preserves_flags)); - } - #[cfg(target_arch = "aarch64")] - unsafe { - asm!("mov {}, sp", out(reg) sp, options(nomem, nostack, preserves_flags)); - } - #[cfg(target_arch = "x86")] - unsafe { - asm!("mov {}, esp", out(reg) sp, options(nomem, nostack, preserves_flags)); - } - sp -} - -/// Clears stack memory between two stack pointers using volatile writes -#[inline(never)] -unsafe fn clear_stack_range(start_sp: *mut u8, end_sp: *mut u8) { - let start = start_sp.min(end_sp) as usize; - let end = start_sp.max(end_sp) as usize; - let size = end - start; - - if size == 0 || size > 1024 * 1024 { // Sanity check - return; - } - - // Clear using volatile writes to prevent optimization - let mut ptr = start as *mut u64; - // Align to 8-byte boundary to clear 8 bytes at a time - let end_ptr = (end & !7) as *mut u64; - - while ptr < end_ptr { - unsafe { - ptr::write_volatile(ptr, 0); - ptr = ptr.add(1); - } - } - - // Clear remaining bytes - let mut byte_ptr = ptr as *mut u8; - let byte_end = end as *mut u8; - while byte_ptr < byte_end { - unsafe { - ptr::write_volatile(byte_ptr, 0); - byte_ptr = byte_ptr.add(1); - } - } -} - -/// Wrapper function that captures stack state and clears after crypto operation -/// -/// # Safety -/// -/// * `crypto_fn` should be marked as `#[inline(never)]`, preventing register -/// reuse or stack layout changes -#[inline(never)] -pub unsafe fn secure_crypto_call(crypto_fn: F) -> R -where - F: FnOnce() -> R, -{ - // Get initial stack pointer - let initial_sp = get_stack_pointer(); - assert!(!initial_sp.is_null()); - - // Call the crypto function (this will use more stack) - let result = crypto_fn(); - - // Get stack pointer after crypto operation - let final_sp = psm::stack_pointer(); - assert!(!final_sp.is_null()); - debug_assert_ne!(initial_sp, final_sp); - - // Clear the stack range used by the crypto function - unsafe { - clear_stack_range(initial_sp, final_sp); - } - - result -} - /// Wrapper function that captures stack state and clears after crypto operation /// by using an allocation on the heap as the stack. /// diff --git a/zeroize/tests/stack_sanitization.rs b/zeroize/tests/stack_sanitization.rs index 3e298a9c..70f899d2 100644 --- a/zeroize/tests/stack_sanitization.rs +++ b/zeroize/tests/stack_sanitization.rs @@ -2,7 +2,7 @@ #[cfg(all(feature = "stack_sanitization", feature = "alloc"))] mod stack_sanitization_tests { - use zeroize::{create_aligned_vec, secure_crypto_call, secure_crypto_call_heap}; + use zeroize::{create_aligned_vec, secure_crypto_call_heap}; fn dummy_fn() -> (*const u8, u64) { let temporary_data = 42; @@ -10,16 +10,6 @@ mod stack_sanitization_tests { (ptr, 12345) } - #[test] - #[ignore = "segmentation fault"] - fn return_safety_and_zeroization_of_temp_data() { - // results in segmentation fault - let result = unsafe { secure_crypto_call(|| dummy_fn()) }; - assert_eq!(result.1, 12345); - // results in segmentation fault - // assert_eq!(unsafe {*result.0}, 42); - } - #[test] fn stack_sanitization_v2() { let mut stack = create_aligned_vec(4, 16); From 560ce88fedbf89caeaac887bb79d3f64e09cd98e Mon Sep 17 00:00:00 2001 From: Noah Stiltner Date: Sat, 13 Sep 2025 17:22:12 -0500 Subject: [PATCH 03/20] move stack_sanitization.rs to its own crate --- stack_sanitizer/Cargo.toml | 25 ++++ stack_sanitizer/README.md | 65 ++++++++++ stack_sanitizer/src/lib.rs | 128 ++++++++++++++++++++ stack_sanitizer/tests/stack_sanitization.rs | 19 +++ 4 files changed, 237 insertions(+) create mode 100644 stack_sanitizer/Cargo.toml create mode 100644 stack_sanitizer/README.md create mode 100644 stack_sanitizer/src/lib.rs create mode 100644 stack_sanitizer/tests/stack_sanitization.rs diff --git a/stack_sanitizer/Cargo.toml b/stack_sanitizer/Cargo.toml new file mode 100644 index 00000000..7d5ffc54 --- /dev/null +++ b/stack_sanitizer/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "stack_sanitizer" +version = "0.1.0" +description = """ +Securely sanitize the stack with a simple function built on +the Portable Stack Manipulation (psm) crate. +""" +authors = ["The RustCrypto Project Developers"] +license = "Apache-2.0 OR MIT" +homepage = "https://github.com/RustCrypto/utils/tree/master/stack_sanitizer" +repository = "https://github.com/RustCrypto/utils" +readme = "README.md" +categories = ["cryptography", "memory-management", "no-std", "os"] +keywords = ["memory", "memset", "secure", "volatile", "zero", "stack"] +edition = "2024" +rust-version = "1.85" + +[dependencies] +psm = { version = "0.1.26", optional = true } +zeroize = { version = "1.0" } + +[features] + +[package.metadata.docs.rs] +all-features = true diff --git a/stack_sanitizer/README.md b/stack_sanitizer/README.md new file mode 100644 index 00000000..1cd873cc --- /dev/null +++ b/stack_sanitizer/README.md @@ -0,0 +1,65 @@ +# [RustCrypto]: stack_sanitizer + +[![Crate][crate-image]][crate-link] +[![Docs][docs-image]][docs-link] +![Apache 2.0/MIT Licensed][license-image] +![MSRV][rustc-image] +[![Build Status][build-image]][build-link] + +Securely zero the stack (a.k.a. [zeroize]) while avoiding compiler optimizations. + +This crate implements a portable approach to securely zeroing the stack using +techniques which guarantee they won't be "optimized away" by the compiler. + +[Documentation] + +## About + +[Zeroing memory securely is hard] - compilers optimize for performance, and +in doing so they love to "optimize away" unnecessary zeroing calls, as well +as make extra copies of data on the stack that cannot be easily zeroed. That's +what this crate is for. + +This crate isn't about tricks: it uses [psm::on_stack] to run a function on +a portable stack, and then uses [zeroize] to zero the stack. `psm` implements +all of the assembly for several different architectures, whereas the [zeroize] +segment was implemented in pure Rust. + +- `#![no_std]` i.e. **embedded-friendly**! (`alloc` is required) +- No functionality besides securely zeroing the a function's stack usage! + +## License + +Licensed under either of: + +* [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0) +* [MIT license](http://opensource.org/licenses/MIT) + +at your option. + +### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in the work by you, as defined in the Apache-2.0 license, shall be +dual licensed as above, without any additional terms or conditions. + +[//]: # (badges) + +[crate-image]: https://img.shields.io/crates/v/zeroize.svg +[crate-link]: https://crates.io/crates/zeroize +[docs-image]: https://docs.rs/zeroize/badge.svg +[docs-link]: https://docs.rs/zeroize/ +[license-image]: https://img.shields.io/badge/license-Apache2.0/MIT-blue.svg +[rustc-image]: https://img.shields.io/badge/rustc-1.85+-blue.svg +[build-image]: https://github.com/RustCrypto/utils/actions/workflows/zeroize.yml/badge.svg?branch=master +[build-link]: https://github.com/RustCrypto/utils/actions/workflows/zeroize.yml?query=branch:master + +[//]: # (general links) + +[RustCrypto]: https://github.com/RustCrypto +[zeroize]: https://en.wikipedia.org/wiki/Zeroisation +[`Zeroize` trait]: https://docs.rs/zeroize/latest/zeroize/trait.Zeroize.html +[Documentation]: https://docs.rs/zeroize/ +[Zeroing memory securely is hard]: http://www.daemonology.net/blog/2014-09-04-how-to-zero-a-buffer.html +[psm::on_stack]: https://docs.rs/psm/latest/psm/fn.on_stack.html +[good cryptographic hygiene]: https://github.com/veorq/cryptocoding#clean-memory-of-secret-data diff --git a/stack_sanitizer/src/lib.rs b/stack_sanitizer/src/lib.rs new file mode 100644 index 00000000..71309aab --- /dev/null +++ b/stack_sanitizer/src/lib.rs @@ -0,0 +1,128 @@ +//! # stack_bleach +//! +//! A crate for sanitizing stack memory after sensitive operations—sometimes referred to as _Stack Bleaching_. +//! +//! Modern compilers and CPUs routinely copy, spill, and rearrange data during execution. Even if sensitive values are scoped to a function, they may: +//! - Be duplicated across multiple stack frames +//! - Be spilled from registers to the stack during register pressure +//! - Persist in memory long after the function returns +//! +//! This crate provides tools to explicitly zeroize stack regions used during +//! cryptographic or sensitive computations, helping mitigate: +//! - Leakage through stack inspection or memory dumps +//! - Residual data from compiler-inserted spills +//! - ABI-visible register reuse across function boundaries +//! +//! ## Why Stack Sanitization Matters +//! +//! Unlike heap memory, stack allocations are ephemeral and compiler-controlled. +//! Sensitive data may be: +//! - Copied implicitly by the optimizer +//! - Stored temporarily during register allocation +//! - Left behind in stack frames even after function return +//! +//! This crate offers abstractions for: +//! - Executing functions on isolated, aligned stack buffers +//! - Zeroizing stack memory after execution +//! +//! ## Safety +//! +//! These operations involve low-level stack manipulation and unsafe code. The +//! caller must ensure: +//! - The stack size provided is large enough for the closure to run with. +//! - The closure does not unwind or return control flow by any means other than +//! directly returning. +//! +//! ## Use Cases +//! +//! - Cryptographic routines +//! - Secure enclave transitions +//! - Sanitizing temporary buffers in high-assurance systems + +use psm::on_stack; + +use zeroize::Zeroize; + +extern crate alloc; + +use alloc::{ + vec, + vec::{Vec} +}; + +/// Executes a function/closure and clears the function's stack frames by using +/// preallocated space on the heap as the function's stack, and then zeroing +/// that allocated data once the code has ran. +/// +/// This function does not clear the CPU registers. +/// +/// # Arguments +/// +/// * `stack_size_kb` - how large the stack will be. `psm` recommends at least +/// `4 KB` of stack size, but the total size cannot overflow an `isize`. Also, +/// some architectures might consume more memory in the stack, such as SPARC. +/// * `crypto_fn` - the code to run while on separate stack. +/// +/// # Safety +/// +/// * `crypto_fn` should be marked as `#[inline(never)]`, preventing register +/// reuse and stack layout changes. +/// * The stack needs to be large enough for `crypto_fn()` to execute without +/// overflow. +/// * `crypto_fn()` must not unwind or return control flow by any other means +/// than by directly returning. +pub unsafe fn exec_on_sanitized_stack(stack_size_kb: isize, crypto_fn: F) -> R +where + F: FnOnce() -> R, +{ + assert!(stack_size_kb * 1024 > 0, "Stack size must be greater than 0 kb and `* 1024` must not overflow `isize`"); + let mut stack = create_aligned_vec(stack_size_kb as usize, core::mem::align_of::()); + let res = unsafe { + on_stack(stack.as_mut_ptr(), stack.len(), || { + let res = crypto_fn(); + res + }) + }; + stack.zeroize(); + res +} + +/// Round up to the nearest multiple of alignment +const fn align_up(value: usize, alignment: usize) -> usize { + (value + alignment - 1) & !(alignment - 1) +} + +/// Creates an aligned Vec with the specified size in KB and alignment. +/// +/// This helps ensure that the safety requirements are met when using +/// `fn secure_crypto_call_heap()`. +/// +/// Both the data pointer and length will be aligned to the specified boundary. +fn create_aligned_vec(size_kb: usize, alignment: usize) -> Vec { + let size_bytes = size_kb * 1024; + // checking one of the safety conditions of `psm::on_stack()` + assert!(size_bytes <= isize::MAX as usize); + + let aligned_size = align_up(size_bytes, alignment); + + // Allocate extra space to ensure we can find an aligned region + let mut vec = vec![0u8; aligned_size + alignment]; + + // Find the aligned position within the vec + let ptr_addr = vec.as_ptr() as usize; + let aligned_addr = align_up(ptr_addr, alignment); + let offset = aligned_addr - ptr_addr; + + // Remove elements from the beginning to align the start + vec.drain(0..offset); + + // Truncate to the exact aligned size we want + vec.truncate(aligned_size); + + // Verify alignment (these will be optimized out in release builds) + debug_assert_eq!(vec.as_ptr() as usize % alignment, 0); + debug_assert_eq!(vec.len() % alignment, 0); + debug_assert_eq!(vec.len(), aligned_size); + + vec +} \ No newline at end of file diff --git a/stack_sanitizer/tests/stack_sanitization.rs b/stack_sanitizer/tests/stack_sanitization.rs new file mode 100644 index 00000000..d9f0bbfd --- /dev/null +++ b/stack_sanitizer/tests/stack_sanitization.rs @@ -0,0 +1,19 @@ +//! Stack sanitization integration tests + +mod stack_sanitization_tests { + use stack_sanitizer::exec_on_sanitized_stack; + + fn dummy_fn() -> (*const u8, u64) { + let temporary_data = 42; + let ptr = temporary_data as *const u8; + (ptr, 12345) + } + + #[test] + fn stack_sanitization_v2() { + let result = unsafe { exec_on_sanitized_stack(4, || dummy_fn())}; + assert_eq!(result.1, 12345); + // results in segmentation fault + // assert_eq!(unsafe {*result.0}, 42); + } +} \ No newline at end of file From 3e3a737353e135f9f0e5d8285d9f988beada554e Mon Sep 17 00:00:00 2001 From: Noah Stiltner Date: Sat, 13 Sep 2025 17:22:34 -0500 Subject: [PATCH 04/20] fmt --- stack_sanitizer/src/lib.rs | 66 ++++++++++----------- stack_sanitizer/tests/stack_sanitization.rs | 4 +- 2 files changed, 35 insertions(+), 35 deletions(-) diff --git a/stack_sanitizer/src/lib.rs b/stack_sanitizer/src/lib.rs index 71309aab..6be26be1 100644 --- a/stack_sanitizer/src/lib.rs +++ b/stack_sanitizer/src/lib.rs @@ -7,7 +7,7 @@ //! - Be spilled from registers to the stack during register pressure //! - Persist in memory long after the function returns //! -//! This crate provides tools to explicitly zeroize stack regions used during +//! This crate provides tools to explicitly zeroize stack regions used during //! cryptographic or sensitive computations, helping mitigate: //! - Leakage through stack inspection or memory dumps //! - Residual data from compiler-inserted spills @@ -15,7 +15,7 @@ //! //! ## Why Stack Sanitization Matters //! -//! Unlike heap memory, stack allocations are ephemeral and compiler-controlled. +//! Unlike heap memory, stack allocations are ephemeral and compiler-controlled. //! Sensitive data may be: //! - Copied implicitly by the optimizer //! - Stored temporarily during register allocation @@ -27,10 +27,10 @@ //! //! ## Safety //! -//! These operations involve low-level stack manipulation and unsafe code. The +//! These operations involve low-level stack manipulation and unsafe code. The //! caller must ensure: //! - The stack size provided is large enough for the closure to run with. -//! - The closure does not unwind or return control flow by any means other than +//! - The closure does not unwind or return control flow by any means other than //! directly returning. //! //! ## Use Cases @@ -43,39 +43,39 @@ use psm::on_stack; use zeroize::Zeroize; -extern crate alloc; +extern crate alloc; -use alloc::{ - vec, - vec::{Vec} -}; +use alloc::{vec, vec::Vec}; -/// Executes a function/closure and clears the function's stack frames by using -/// preallocated space on the heap as the function's stack, and then zeroing +/// Executes a function/closure and clears the function's stack frames by using +/// preallocated space on the heap as the function's stack, and then zeroing /// that allocated data once the code has ran. -/// +/// /// This function does not clear the CPU registers. -/// +/// /// # Arguments -/// -/// * `stack_size_kb` - how large the stack will be. `psm` recommends at least -/// `4 KB` of stack size, but the total size cannot overflow an `isize`. Also, +/// +/// * `stack_size_kb` - how large the stack will be. `psm` recommends at least +/// `4 KB` of stack size, but the total size cannot overflow an `isize`. Also, /// some architectures might consume more memory in the stack, such as SPARC. /// * `crypto_fn` - the code to run while on separate stack. -/// +/// /// # Safety -/// -/// * `crypto_fn` should be marked as `#[inline(never)]`, preventing register +/// +/// * `crypto_fn` should be marked as `#[inline(never)]`, preventing register /// reuse and stack layout changes. -/// * The stack needs to be large enough for `crypto_fn()` to execute without +/// * The stack needs to be large enough for `crypto_fn()` to execute without /// overflow. -/// * `crypto_fn()` must not unwind or return control flow by any other means +/// * `crypto_fn()` must not unwind or return control flow by any other means /// than by directly returning. pub unsafe fn exec_on_sanitized_stack(stack_size_kb: isize, crypto_fn: F) -> R -where +where F: FnOnce() -> R, { - assert!(stack_size_kb * 1024 > 0, "Stack size must be greater than 0 kb and `* 1024` must not overflow `isize`"); + assert!( + stack_size_kb * 1024 > 0, + "Stack size must be greater than 0 kb and `* 1024` must not overflow `isize`" + ); let mut stack = create_aligned_vec(stack_size_kb as usize, core::mem::align_of::()); let res = unsafe { on_stack(stack.as_mut_ptr(), stack.len(), || { @@ -93,10 +93,10 @@ const fn align_up(value: usize, alignment: usize) -> usize { } /// Creates an aligned Vec with the specified size in KB and alignment. -/// -/// This helps ensure that the safety requirements are met when using +/// +/// This helps ensure that the safety requirements are met when using /// `fn secure_crypto_call_heap()`. -/// +/// /// Both the data pointer and length will be aligned to the specified boundary. fn create_aligned_vec(size_kb: usize, alignment: usize) -> Vec { let size_bytes = size_kb * 1024; @@ -104,25 +104,25 @@ fn create_aligned_vec(size_kb: usize, alignment: usize) -> Vec { assert!(size_bytes <= isize::MAX as usize); let aligned_size = align_up(size_bytes, alignment); - + // Allocate extra space to ensure we can find an aligned region let mut vec = vec![0u8; aligned_size + alignment]; - + // Find the aligned position within the vec let ptr_addr = vec.as_ptr() as usize; let aligned_addr = align_up(ptr_addr, alignment); let offset = aligned_addr - ptr_addr; - + // Remove elements from the beginning to align the start vec.drain(0..offset); - + // Truncate to the exact aligned size we want vec.truncate(aligned_size); - + // Verify alignment (these will be optimized out in release builds) debug_assert_eq!(vec.as_ptr() as usize % alignment, 0); debug_assert_eq!(vec.len() % alignment, 0); debug_assert_eq!(vec.len(), aligned_size); - + vec -} \ No newline at end of file +} diff --git a/stack_sanitizer/tests/stack_sanitization.rs b/stack_sanitizer/tests/stack_sanitization.rs index d9f0bbfd..50f96a22 100644 --- a/stack_sanitizer/tests/stack_sanitization.rs +++ b/stack_sanitizer/tests/stack_sanitization.rs @@ -11,9 +11,9 @@ mod stack_sanitization_tests { #[test] fn stack_sanitization_v2() { - let result = unsafe { exec_on_sanitized_stack(4, || dummy_fn())}; + let result = unsafe { exec_on_sanitized_stack(4, || dummy_fn()) }; assert_eq!(result.1, 12345); // results in segmentation fault // assert_eq!(unsafe {*result.0}, 42); } -} \ No newline at end of file +} From 075d06b38b98fd040444db03b8dc6f6983eb8c4f Mon Sep 17 00:00:00 2001 From: Noah Stiltner Date: Sat, 13 Sep 2025 19:21:20 -0500 Subject: [PATCH 05/20] moved to zeroize_stack --- Cargo.lock | 39 +++++++++ Cargo.toml | 3 +- zeroize/Cargo.toml | 2 - zeroize/src/lib.rs | 9 -- zeroize/src/stack_sanitization.rs | 86 ------------------- zeroize/tests/stack_sanitization.rs | 21 ----- {stack_sanitizer => zeroize_stack}/Cargo.toml | 8 +- {stack_sanitizer => zeroize_stack}/README.md | 8 +- {stack_sanitizer => zeroize_stack}/src/lib.rs | 8 +- .../tests/zeroize_stack.rs | 6 +- 10 files changed, 57 insertions(+), 133 deletions(-) delete mode 100644 zeroize/src/stack_sanitization.rs delete mode 100644 zeroize/tests/stack_sanitization.rs rename {stack_sanitizer => zeroize_stack}/Cargo.toml (76%) rename {stack_sanitizer => zeroize_stack}/README.md (91%) rename {stack_sanitizer => zeroize_stack}/src/lib.rs (96%) rename stack_sanitizer/tests/stack_sanitization.rs => zeroize_stack/tests/zeroize_stack.rs (71%) diff --git a/Cargo.lock b/Cargo.lock index c95317cc..bf1b5503 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -31,6 +31,16 @@ dependencies = [ "hybrid-array 0.4.1", ] +[[package]] +name = "cc" +version = "1.2.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "65193589c6404eb80b450d618eaf9a2cafaaafd57ecce47370519ef674a7bd44" +dependencies = [ + "find-msvc-tools", + "shlex", +] + [[package]] name = "cfg-if" version = "1.0.0" @@ -103,6 +113,12 @@ dependencies = [ "sha3", ] +[[package]] +name = "find-msvc-tools" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fd99930f64d146689264c637b5af2f0233a933bef0d8570e2526bf9e083192d" + [[package]] name = "hex" version = "0.4.3" @@ -185,6 +201,15 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "psm" +version = "0.1.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e944464ec8536cd1beb0bbfd96987eb5e3b72f2ecdafdc5c769a37f1fa2ae1f" +dependencies = [ + "cc", +] + [[package]] name = "quote" version = "1.0.40" @@ -253,6 +278,12 @@ dependencies = [ "keccak", ] +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + [[package]] name = "syn" version = "2.0.100" @@ -308,3 +339,11 @@ dependencies = [ "quote", "syn", ] + +[[package]] +name = "zeroize_stack" +version = "0.1.0" +dependencies = [ + "psm", + "zeroize 1.8.1", +] diff --git a/Cargo.toml b/Cargo.toml index d20ba5cc..332f4862 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,7 +14,8 @@ members = [ "opaque-debug", "wycheproof2blb", "zeroize", - "zeroize_derive" + "zeroize_derive", + "zeroize_stack" ] exclude = ["aarch64-dit"] diff --git a/zeroize/Cargo.toml b/zeroize/Cargo.toml index b23f44ae..08e27273 100644 --- a/zeroize/Cargo.toml +++ b/zeroize/Cargo.toml @@ -19,14 +19,12 @@ edition = "2024" rust-version = "1.85" [dependencies] -psm = { version = "0.1.26", optional = true } serde = { version = "1.0", default-features = false, optional = true } zeroize_derive = { version = "1.4", path = "../zeroize_derive", optional = true } [features] default = ["alloc"] alloc = [] -stack_sanitization = ["psm"] std = ["alloc"] aarch64 = [] # NOTE: vestigial no-op feature; AArch64 support is always enabled now diff --git a/zeroize/src/lib.rs b/zeroize/src/lib.rs index e6984bc7..8d7b915f 100644 --- a/zeroize/src/lib.rs +++ b/zeroize/src/lib.rs @@ -250,15 +250,6 @@ mod aarch64; #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] mod x86; -#[cfg(feature = "stack_sanitization")] -mod stack_sanitization; - -#[cfg(feature = "stack_sanitization")] -pub use stack_sanitization::secure_crypto_call_heap; - -#[cfg(all(feature = "stack_sanitization", feature = "alloc"))] -pub use stack_sanitization::create_aligned_vec; - use core::{ marker::{PhantomData, PhantomPinned}, mem::{MaybeUninit, size_of}, diff --git a/zeroize/src/stack_sanitization.rs b/zeroize/src/stack_sanitization.rs deleted file mode 100644 index 46083b7e..00000000 --- a/zeroize/src/stack_sanitization.rs +++ /dev/null @@ -1,86 +0,0 @@ -//! Module for sanitizing the stack, sometimes referred to as "Stack Bleaching." - -use core::arch::asm; -use core::ptr; - -use crate::Zeroize; - -#[cfg(feature = "alloc")] -use alloc::{ - vec, - vec::{Vec} -}; - -/// Wrapper function that captures stack state and clears after crypto operation -/// by using an allocation on the heap as the stack. -/// -/// If you wish to clear the registers, it is recommended to clear them from -/// within `crypto_fn()`. This function does not clear them for you. -/// -/// # Safety -/// -/// * `crypto_fn` should be marked as `#[inline(never)]`, preventing register -/// reuse and stack layout changes. -/// * The stack start address needs to be aligned for the target architecture, which is -/// typically 16 bytes for x86_64. -/// * The stack size needs to be a multiple of stack alignment required by -/// the target. -/// * The stack size must not overflow `isize`. -/// * The stack needs to be large enough for `crypto_fn()` to execute without -/// overflow. -/// * `crypto_fn()` must not unwind or return control flow by any other means -/// than by directly returning. -pub unsafe fn secure_crypto_call_heap(crypto_fn: F, stack: &mut [u8] ) -> R -where - F: FnOnce() -> R, -{ - let res = unsafe { - psm::on_stack(stack.as_mut_ptr(), stack.len(), || { - let res = crypto_fn(); - res - }) - }; - stack.zeroize(); - res -} - -/// Round up to the nearest multiple of alignment -const fn align_up(value: usize, alignment: usize) -> usize { - (value + alignment - 1) & !(alignment - 1) -} - -/// Creates an aligned Vec with the specified size in KB and alignment. -/// -/// This helps ensure that the safety requirements are met when using -/// `fn secure_crypto_call_heap()`. -/// -/// Both the data pointer and length will be aligned to the specified boundary. -#[cfg(feature = "alloc")] -pub fn create_aligned_vec(size_kb: usize, alignment: usize) -> Vec { - let size_bytes = size_kb * 1024; - // checking one of the safety conditions of `psm::on_stack()` - assert!(size_bytes <= isize::MAX as usize); - - let aligned_size = align_up(size_bytes, alignment); - - // Allocate extra space to ensure we can find an aligned region - let mut vec = vec![0u8; aligned_size + alignment]; - - // Find the aligned position within the vec - let ptr_addr = vec.as_ptr() as usize; - let aligned_addr = align_up(ptr_addr, alignment); - let offset = aligned_addr - ptr_addr; - - // Remove elements from the beginning to align the start - vec.drain(0..offset); - - // Truncate to the exact aligned size we want - vec.truncate(aligned_size); - - // Verify alignment (these will be optimized out in release builds) - debug_assert_eq!(vec.as_ptr() as usize % alignment, 0); - debug_assert_eq!(vec.len() % alignment, 0); - debug_assert_eq!(vec.len(), aligned_size); - - vec -} \ No newline at end of file diff --git a/zeroize/tests/stack_sanitization.rs b/zeroize/tests/stack_sanitization.rs deleted file mode 100644 index 70f899d2..00000000 --- a/zeroize/tests/stack_sanitization.rs +++ /dev/null @@ -1,21 +0,0 @@ -//! Stack sanitization integration tests - -#[cfg(all(feature = "stack_sanitization", feature = "alloc"))] -mod stack_sanitization_tests { - use zeroize::{create_aligned_vec, secure_crypto_call_heap}; - - fn dummy_fn() -> (*const u8, u64) { - let temporary_data = 42; - let ptr = temporary_data as *const u8; - (ptr, 12345) - } - - #[test] - fn stack_sanitization_v2() { - let mut stack = create_aligned_vec(4, 16); - let result = unsafe { secure_crypto_call_heap(|| {dummy_fn()}, &mut stack)}; - assert_eq!(result.1, 12345); - // results in segmentation fault - // assert_eq!(unsafe {*result.0}, 42); - } -} \ No newline at end of file diff --git a/stack_sanitizer/Cargo.toml b/zeroize_stack/Cargo.toml similarity index 76% rename from stack_sanitizer/Cargo.toml rename to zeroize_stack/Cargo.toml index 7d5ffc54..8b344c26 100644 --- a/stack_sanitizer/Cargo.toml +++ b/zeroize_stack/Cargo.toml @@ -1,9 +1,9 @@ [package] -name = "stack_sanitizer" +name = "zeroize_stack" version = "0.1.0" description = """ -Securely sanitize the stack with a simple function built on -the Portable Stack Manipulation (psm) crate. +Securely zeroize the stack with a simple function built on +the Portable Stack Manipulation (psm) crate and zeroize crate. """ authors = ["The RustCrypto Project Developers"] license = "Apache-2.0 OR MIT" @@ -16,7 +16,7 @@ edition = "2024" rust-version = "1.85" [dependencies] -psm = { version = "0.1.26", optional = true } +psm = "0.1.26" zeroize = { version = "1.0" } [features] diff --git a/stack_sanitizer/README.md b/zeroize_stack/README.md similarity index 91% rename from stack_sanitizer/README.md rename to zeroize_stack/README.md index 1cd873cc..0f4e0f7e 100644 --- a/stack_sanitizer/README.md +++ b/zeroize_stack/README.md @@ -1,4 +1,4 @@ -# [RustCrypto]: stack_sanitizer +# [RustCrypto]: zeroize_stack [![Crate][crate-image]][crate-link] [![Docs][docs-image]][docs-link] @@ -21,9 +21,9 @@ as make extra copies of data on the stack that cannot be easily zeroed. That's what this crate is for. This crate isn't about tricks: it uses [psm::on_stack] to run a function on -a portable stack, and then uses [zeroize] to zero the stack. `psm` implements -all of the assembly for several different architectures, whereas the [zeroize] -segment was implemented in pure Rust. +a portable stack, and then uses [zeroize] to zero that stack. `psm` implements +all of the assembly for several different architectures, and the [zeroize] +portion of the task was implemented in pure Rust. - `#![no_std]` i.e. **embedded-friendly**! (`alloc` is required) - No functionality besides securely zeroing the a function's stack usage! diff --git a/stack_sanitizer/src/lib.rs b/zeroize_stack/src/lib.rs similarity index 96% rename from stack_sanitizer/src/lib.rs rename to zeroize_stack/src/lib.rs index 6be26be1..591a58dd 100644 --- a/stack_sanitizer/src/lib.rs +++ b/zeroize_stack/src/lib.rs @@ -1,4 +1,4 @@ -//! # stack_bleach +//! # zeroize_stack //! //! A crate for sanitizing stack memory after sensitive operations—sometimes referred to as _Stack Bleaching_. //! @@ -47,9 +47,9 @@ extern crate alloc; use alloc::{vec, vec::Vec}; -/// Executes a function/closure and clears the function's stack frames by using +/// Executes a function/closure and clears the function's stack by using /// preallocated space on the heap as the function's stack, and then zeroing -/// that allocated data once the code has ran. +/// that allocated space once the code has ran. /// /// This function does not clear the CPU registers. /// @@ -58,7 +58,7 @@ use alloc::{vec, vec::Vec}; /// * `stack_size_kb` - how large the stack will be. `psm` recommends at least /// `4 KB` of stack size, but the total size cannot overflow an `isize`. Also, /// some architectures might consume more memory in the stack, such as SPARC. -/// * `crypto_fn` - the code to run while on separate stack. +/// * `crypto_fn` - the code to run while on the separate stack. /// /// # Safety /// diff --git a/stack_sanitizer/tests/stack_sanitization.rs b/zeroize_stack/tests/zeroize_stack.rs similarity index 71% rename from stack_sanitizer/tests/stack_sanitization.rs rename to zeroize_stack/tests/zeroize_stack.rs index 50f96a22..dd9e6548 100644 --- a/stack_sanitizer/tests/stack_sanitization.rs +++ b/zeroize_stack/tests/zeroize_stack.rs @@ -1,8 +1,9 @@ //! Stack sanitization integration tests mod stack_sanitization_tests { - use stack_sanitizer::exec_on_sanitized_stack; + use zeroize_stack::exec_on_sanitized_stack; + #[inline(never)] fn dummy_fn() -> (*const u8, u64) { let temporary_data = 42; let ptr = temporary_data as *const u8; @@ -13,7 +14,8 @@ mod stack_sanitization_tests { fn stack_sanitization_v2() { let result = unsafe { exec_on_sanitized_stack(4, || dummy_fn()) }; assert_eq!(result.1, 12345); - // results in segmentation fault + // results in segmentation fault, which is somewhat normal... just wanted + // to try it // assert_eq!(unsafe {*result.0}, 42); } } From e791a4b558d665ce569b3438cbd58daf0771fc2d Mon Sep 17 00:00:00 2001 From: Noah Stiltner Date: Sat, 13 Sep 2025 21:28:47 -0500 Subject: [PATCH 06/20] revised structure to allow miri to run, but panic --- zeroize_stack/src/lib.rs | 89 ++++++++++++++++++++++------------------ 1 file changed, 49 insertions(+), 40 deletions(-) diff --git a/zeroize_stack/src/lib.rs b/zeroize_stack/src/lib.rs index 591a58dd..731c2acb 100644 --- a/zeroize_stack/src/lib.rs +++ b/zeroize_stack/src/lib.rs @@ -31,7 +31,7 @@ //! caller must ensure: //! - The stack size provided is large enough for the closure to run with. //! - The closure does not unwind or return control flow by any means other than -//! directly returning. +//! directly returning. //! //! ## Use Cases //! @@ -39,52 +39,61 @@ //! - Secure enclave transitions //! - Sanitizing temporary buffers in high-assurance systems -use psm::on_stack; - use zeroize::Zeroize; extern crate alloc; use alloc::{vec, vec::Vec}; -/// Executes a function/closure and clears the function's stack by using -/// preallocated space on the heap as the function's stack, and then zeroing -/// that allocated space once the code has ran. -/// -/// This function does not clear the CPU registers. -/// -/// # Arguments -/// -/// * `stack_size_kb` - how large the stack will be. `psm` recommends at least -/// `4 KB` of stack size, but the total size cannot overflow an `isize`. Also, -/// some architectures might consume more memory in the stack, such as SPARC. -/// * `crypto_fn` - the code to run while on the separate stack. -/// -/// # Safety -/// -/// * `crypto_fn` should be marked as `#[inline(never)]`, preventing register -/// reuse and stack layout changes. -/// * The stack needs to be large enough for `crypto_fn()` to execute without -/// overflow. -/// * `crypto_fn()` must not unwind or return control flow by any other means -/// than by directly returning. -pub unsafe fn exec_on_sanitized_stack(stack_size_kb: isize, crypto_fn: F) -> R -where - F: FnOnce() -> R, -{ - assert!( - stack_size_kb * 1024 > 0, - "Stack size must be greater than 0 kb and `* 1024` must not overflow `isize`" - ); - let mut stack = create_aligned_vec(stack_size_kb as usize, core::mem::align_of::()); - let res = unsafe { - on_stack(stack.as_mut_ptr(), stack.len(), || { - let res = crypto_fn(); +psm::psm_stack_manipulation! { + yes { + /// Executes a function/closure and clears the function's stack by using + /// preallocated space on the heap as the function's stack, and then zeroing + /// that allocated space once the code has ran. + /// + /// This function does not clear the CPU registers. + /// + /// # Arguments + /// + /// * `stack_size_kb` - how large the stack will be. `psm` recommends at least + /// `4 KB` of stack size, but the total size cannot overflow an `isize`. Also, + /// some architectures might consume more memory in the stack, such as SPARC. + /// * `crypto_fn` - the code to run while on the separate stack. + /// + /// # Safety + /// + /// * `crypto_fn` should be marked as `#[inline(never)]`, preventing register + /// reuse and stack layout changes. + /// * The stack needs to be large enough for `crypto_fn()` to execute without + /// overflow. + /// * `crypto_fn()` must not unwind or return control flow by any other means + /// than by directly returning. + pub unsafe fn exec_on_sanitized_stack(stack_size_kb: isize, crypto_fn: F) -> R + where + F: FnOnce() -> R, + { + assert!( + stack_size_kb * 1024 > 0, + "Stack size must be greater than 0 kb and `* 1024` must not overflow `isize`" + ); + let mut stack = create_aligned_vec(stack_size_kb as usize, core::mem::align_of::()); + let res = unsafe { + psm::on_stack(stack.as_mut_ptr(), stack.len(), || { + crypto_fn() + }) + }; + stack.zeroize(); res - }) - }; - stack.zeroize(); - res + } + } + no { + pub unsafe fn exec_on_sanitized_stack(_stack_size_kb: isize, _crypto_fn: F) -> R + where + F: FnOnce() -> R, + { + panic!("Stack manipulation not possible on this platform") + } + } } /// Round up to the nearest multiple of alignment From b02d547ed6f7aad516ffeb6dc48021bbdc65e7ff Mon Sep 17 00:00:00 2001 From: Noah Stiltner Date: Tue, 16 Sep 2025 20:43:37 -0500 Subject: [PATCH 07/20] added std feature to enforce panic/unwind safety; moved inline(never) comment to debugging section; make no-panic feature for no branch of psm macro? handle closure panic with match? --- zeroize_stack/Cargo.toml | 2 + zeroize_stack/src/lib.rs | 79 ++++++++++++++++++++++++---- zeroize_stack/tests/zeroize_stack.rs | 2 +- 3 files changed, 72 insertions(+), 11 deletions(-) diff --git a/zeroize_stack/Cargo.toml b/zeroize_stack/Cargo.toml index 8b344c26..ada2acf0 100644 --- a/zeroize_stack/Cargo.toml +++ b/zeroize_stack/Cargo.toml @@ -20,6 +20,8 @@ psm = "0.1.26" zeroize = { version = "1.0" } [features] +default = ["std"] +std = [] [package.metadata.docs.rs] all-features = true diff --git a/zeroize_stack/src/lib.rs b/zeroize_stack/src/lib.rs index 731c2acb..524bf62a 100644 --- a/zeroize_stack/src/lib.rs +++ b/zeroize_stack/src/lib.rs @@ -1,3 +1,11 @@ +#![no_std] +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc( + html_logo_url = "https://raw.githubusercontent.com/RustCrypto/media/6ee8e381/logo.svg", + html_favicon_url = "https://raw.githubusercontent.com/RustCrypto/media/6ee8e381/logo.svg" +)] +#![warn(missing_docs, unused_qualifications)] + //! # zeroize_stack //! //! A crate for sanitizing stack memory after sensitive operations—sometimes referred to as _Stack Bleaching_. @@ -31,8 +39,16 @@ //! caller must ensure: //! - The stack size provided is large enough for the closure to run with. //! - The closure does not unwind or return control flow by any means other than -//! directly returning. +//! directly returning. `std` users do not need to worry about this due to +//! the existence of `catch_unwind`. //! +//! ## `nostd` Support +//! +//! This crate is compatible with `nostd` environments, but it is less safe +//! in the event that your stack-switched stack panics. Panicking on a separate +//! stack can cause undefined behavior (UB), but if it can be caught with +//! `std::panic::catch_unwind`, that aspect of the safety should be more safe. +//! //! ## Use Cases //! //! - Cryptographic routines @@ -45,6 +61,27 @@ extern crate alloc; use alloc::{vec, vec::Vec}; +#[cfg(feature = "std")] +extern crate std; +#[cfg(feature = "std")] +use core::any::Any; +#[cfg(feature = "std")] +use std::{ + boxed::Box, + panic::catch_unwind, +}; +#[cfg(feature = "std")] +type StackSwitchResult = Result>; +#[cfg(not(feature = "std"))] +type StackSwitchResult = T; + +use core::panic::{AssertUnwindSafe, UnwindSafe}; + +#[derive(Debug)] +enum Error { + StackPanicked +} + psm::psm_stack_manipulation! { yes { /// Executes a function/closure and clears the function's stack by using @@ -60,26 +97,48 @@ psm::psm_stack_manipulation! { /// some architectures might consume more memory in the stack, such as SPARC. /// * `crypto_fn` - the code to run while on the separate stack. /// + /// ## Panicking + /// + /// This function panics when `psm` detects that `on_stack` is unavailable. + /// + /// ## Errors + /// + /// With the `std` feature enabled, this function will result in an error when + /// the closure panics. You may want to log these errors securely, privately, + /// as cryptography panics could be a little revealing if displayed to + /// the end user. + /// + /// ## Debugging + /// + /// Using `#[inline(never)]` on the closure's function definition could + /// make it easier to debug as the function should show up. + /// /// # Safety - /// - /// * `crypto_fn` should be marked as `#[inline(never)]`, preventing register - /// reuse and stack layout changes. + /// /// * The stack needs to be large enough for `crypto_fn()` to execute without /// overflow. - /// * `crypto_fn()` must not unwind or return control flow by any other means + /// * `nostd` only: `crypto_fn()` must not unwind or return control flow by any other means /// than by directly returning. - pub unsafe fn exec_on_sanitized_stack(stack_size_kb: isize, crypto_fn: F) -> R + pub unsafe fn exec_on_sanitized_stack(stack_size_kb: isize, crypto_fn: F) -> StackSwitchResult where - F: FnOnce() -> R, + F: FnOnce() -> R + UnwindSafe, { assert!( stack_size_kb * 1024 > 0, "Stack size must be greater than 0 kb and `* 1024` must not overflow `isize`" ); - let mut stack = create_aligned_vec(stack_size_kb as usize, core::mem::align_of::()); + let mut stack = create_aligned_vec(stack_size_kb as usize, align_of::()); + let res = unsafe { psm::on_stack(stack.as_mut_ptr(), stack.len(), || { - crypto_fn() + #[cfg(not(feature = "std"))] + { + crypto_fn() + } + #[cfg(feature = "std")] + { + catch_unwind(AssertUnwindSafe(crypto_fn)) + } }) }; stack.zeroize(); @@ -87,7 +146,7 @@ psm::psm_stack_manipulation! { } } no { - pub unsafe fn exec_on_sanitized_stack(_stack_size_kb: isize, _crypto_fn: F) -> R + pub unsafe fn exec_on_sanitized_stack(_stack_size_kb: isize, _crypto_fn: F) -> StackSwitchResult where F: FnOnce() -> R, { diff --git a/zeroize_stack/tests/zeroize_stack.rs b/zeroize_stack/tests/zeroize_stack.rs index dd9e6548..2dfd22b0 100644 --- a/zeroize_stack/tests/zeroize_stack.rs +++ b/zeroize_stack/tests/zeroize_stack.rs @@ -13,7 +13,7 @@ mod stack_sanitization_tests { #[test] fn stack_sanitization_v2() { let result = unsafe { exec_on_sanitized_stack(4, || dummy_fn()) }; - assert_eq!(result.1, 12345); + assert_eq!(result.unwrap().1, 12345); // results in segmentation fault, which is somewhat normal... just wanted // to try it // assert_eq!(unsafe {*result.0}, 42); From 6febd9f1ddcbd7d6b9911fb0697ad443d6e7b4c0 Mon Sep 17 00:00:00 2001 From: Noah Stiltner Date: Tue, 16 Sep 2025 20:44:10 -0500 Subject: [PATCH 08/20] fmt --- zeroize_stack/src/lib.rs | 41 +++++++++++++++++++--------------------- 1 file changed, 19 insertions(+), 22 deletions(-) diff --git a/zeroize_stack/src/lib.rs b/zeroize_stack/src/lib.rs index 524bf62a..d34e865c 100644 --- a/zeroize_stack/src/lib.rs +++ b/zeroize_stack/src/lib.rs @@ -39,16 +39,16 @@ //! caller must ensure: //! - The stack size provided is large enough for the closure to run with. //! - The closure does not unwind or return control flow by any means other than -//! directly returning. `std` users do not need to worry about this due to +//! directly returning. `std` users do not need to worry about this due to //! the existence of `catch_unwind`. //! //! ## `nostd` Support -//! +//! //! This crate is compatible with `nostd` environments, but it is less safe -//! in the event that your stack-switched stack panics. Panicking on a separate -//! stack can cause undefined behavior (UB), but if it can be caught with +//! in the event that your stack-switched stack panics. Panicking on a separate +//! stack can cause undefined behavior (UB), but if it can be caught with //! `std::panic::catch_unwind`, that aspect of the safety should be more safe. -//! +//! //! ## Use Cases //! //! - Cryptographic routines @@ -66,10 +66,7 @@ extern crate std; #[cfg(feature = "std")] use core::any::Any; #[cfg(feature = "std")] -use std::{ - boxed::Box, - panic::catch_unwind, -}; +use std::{boxed::Box, panic::catch_unwind}; #[cfg(feature = "std")] type StackSwitchResult = Result>; #[cfg(not(feature = "std"))] @@ -79,7 +76,7 @@ use core::panic::{AssertUnwindSafe, UnwindSafe}; #[derive(Debug)] enum Error { - StackPanicked + StackPanicked, } psm::psm_stack_manipulation! { @@ -98,23 +95,23 @@ psm::psm_stack_manipulation! { /// * `crypto_fn` - the code to run while on the separate stack. /// /// ## Panicking - /// + /// /// This function panics when `psm` detects that `on_stack` is unavailable. - /// + /// /// ## Errors - /// - /// With the `std` feature enabled, this function will result in an error when - /// the closure panics. You may want to log these errors securely, privately, - /// as cryptography panics could be a little revealing if displayed to + /// + /// With the `std` feature enabled, this function will result in an error when + /// the closure panics. You may want to log these errors securely, privately, + /// as cryptography panics could be a little revealing if displayed to /// the end user. - /// + /// /// ## Debugging - /// - /// Using `#[inline(never)]` on the closure's function definition could + /// + /// Using `#[inline(never)]` on the closure's function definition could /// make it easier to debug as the function should show up. - /// + /// /// # Safety - /// + /// /// * The stack needs to be large enough for `crypto_fn()` to execute without /// overflow. /// * `nostd` only: `crypto_fn()` must not unwind or return control flow by any other means @@ -128,7 +125,7 @@ psm::psm_stack_manipulation! { "Stack size must be greater than 0 kb and `* 1024` must not overflow `isize`" ); let mut stack = create_aligned_vec(stack_size_kb as usize, align_of::()); - + let res = unsafe { psm::on_stack(stack.as_mut_ptr(), stack.len(), || { #[cfg(not(feature = "std"))] From d4447b97f012d6da6b291a9cc0fbadc801573bb6 Mon Sep 17 00:00:00 2001 From: Noah Stiltner Date: Wed, 17 Sep 2025 09:51:30 -0500 Subject: [PATCH 09/20] minor doc revisions --- zeroize_stack/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/zeroize_stack/src/lib.rs b/zeroize_stack/src/lib.rs index d34e865c..49968e62 100644 --- a/zeroize_stack/src/lib.rs +++ b/zeroize_stack/src/lib.rs @@ -107,8 +107,8 @@ psm::psm_stack_manipulation! { /// /// ## Debugging /// - /// Using `#[inline(never)]` on the closure's function definition could - /// make it easier to debug as the function should show up. + /// Using `#[inline(never)]` on the closure's function definition(s) could + /// make it easier to debug as the function(s) should show up in backtraces. /// /// # Safety /// From b4ea722d3dff666f3db78f68d54137dcafc0cb02 Mon Sep 17 00:00:00 2001 From: Noah Stiltner Date: Wed, 17 Sep 2025 10:26:33 -0500 Subject: [PATCH 10/20] minor doc revisions, fixed clippy; next steps: support async closures with futures::executor::block_on(f()), add asm alternative(?), handle unwind better(?), use stacker crate to handle stack size management(?) or at least use their code(?) --- zeroize_stack/src/lib.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/zeroize_stack/src/lib.rs b/zeroize_stack/src/lib.rs index 49968e62..5bc57999 100644 --- a/zeroize_stack/src/lib.rs +++ b/zeroize_stack/src/lib.rs @@ -74,11 +74,6 @@ type StackSwitchResult = T; use core::panic::{AssertUnwindSafe, UnwindSafe}; -#[derive(Debug)] -enum Error { - StackPanicked, -} - psm::psm_stack_manipulation! { yes { /// Executes a function/closure and clears the function's stack by using @@ -110,6 +105,11 @@ psm::psm_stack_manipulation! { /// Using `#[inline(never)]` on the closure's function definition(s) could /// make it easier to debug as the function(s) should show up in backtraces. /// + /// # Returns + /// + /// * If `std` is enabled, this returns a `Result>` + /// * Otherwise, this returns `R` directly; no panics are caught. + /// /// # Safety /// /// * The stack needs to be large enough for `crypto_fn()` to execute without From 3b8137614e0d1869038a0ec7331ec8ce6388abc5 Mon Sep 17 00:00:00 2001 From: Noah Stiltner Date: Wed, 17 Sep 2025 11:01:31 -0500 Subject: [PATCH 11/20] added TODO.md --- zeroize_stack/TODO.md | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) create mode 100644 zeroize_stack/TODO.md diff --git a/zeroize_stack/TODO.md b/zeroize_stack/TODO.md new file mode 100644 index 00000000..ce4681bb --- /dev/null +++ b/zeroize_stack/TODO.md @@ -0,0 +1,32 @@ +# TODO: + +* Add support for async closures, possibly using a macro to define the functions if necessary. Use `futures::executor::block_on(f())` to poll the entire future completion inside the stack switched context, and avoid `.await` that yields control outside of the `on_stack()` boundary. Something like: + +```rust +pub unsafe fn exec_async_on_sanitized_stack( + stack: &mut [u8], + f: F, +) -> Result> +where + F: FnOnce() -> Fut + UnwindSafe, + Fut: Future, +{ + let mut result = None; + + on_stack(stack, || { + result = Some(catch_unwind(AssertUnwindSafe(|| { + // Block on the future inside the heap stack + futures::executor::block_on(f()) + }))); + }); + + result.expect("Closure did not run") +} +``` + +* Handle unwinds better: currently we return a `Result>`. The error case is a little bit tricky to handle, as dropping the error could cause a panic. The program should either panic, or return the panic payload's message. + +* Use stacker crate to handle stack size management: if I read some of the `stacker` docs correctly, that crate should be able to extend the size of the stack when it is about to overflow. If that is correct, we could use their techniques to allocate a new stack and zeroize the old one whenever our allocated stack is about to overflow, eliminating the primary remaining `# Safety` comment. Note: we may not be able to zeroize the old stack immediately as the stack switching process likely attempts to return to the old stack once execution completes; we might have to wait until execution completes before zeroizing all heap-stacks. + +* Add an `asm!` alternative method for stack bleaching. In theory, it would be better to use `asm!` as we would not need to worry about the size of the allocated switched stack, and it would keep all of the code running on the actual stack and not the heap, possibly preserving performance. The problem with this is that using pointers from `asm!` and rust code to zero the space between the pointers results in segmentation faults on `x86_64`. + * when testing this, assert that the two pointers are not equal to each other and not null. \ No newline at end of file From d66530086f73c219981348a0b1b4aeb6dd4bbc0d Mon Sep 17 00:00:00 2001 From: Noah Stiltner Date: Sun, 21 Sep 2025 12:11:18 -0500 Subject: [PATCH 12/20] update TODO.md, next step will be allowing heap-stack reuse --- zeroize_stack/TODO.md | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/zeroize_stack/TODO.md b/zeroize_stack/TODO.md index ce4681bb..2867d31f 100644 --- a/zeroize_stack/TODO.md +++ b/zeroize_stack/TODO.md @@ -1,5 +1,7 @@ # TODO: +## Likely impossible and/or unsafe + * Add support for async closures, possibly using a macro to define the functions if necessary. Use `futures::executor::block_on(f())` to poll the entire future completion inside the stack switched context, and avoid `.await` that yields control outside of the `on_stack()` boundary. Something like: ```rust @@ -24,9 +26,21 @@ where } ``` +Copilot provided that code, but Gemini says that after the future is awaited, there will be no way for the program to know which stack to return to. Also, there is an open issue regarding async closures in `stacker` that has not been resolved after 7 months. https://github.com/rust-lang/stacker/issues/111 + +## Safe + +* Allow stack reuse. More efficient to zero one stack shared by multiple functions. `impl Drop` and `ZeroizeOnDrop` and make the main public function only accept a mutable `HeapStack` struct, and allow for the stack to get zeroed on drop. + +* Panic when the OS is `hermit` or it is running on `wasm32` or `wasm64`, as their stacks don't behave the same as all of the others. + * Handle unwinds better: currently we return a `Result>`. The error case is a little bit tricky to handle, as dropping the error could cause a panic. The program should either panic, or return the panic payload's message. +## Would require a PR to `stacker` to zero the allocated stack on drop + * Use stacker crate to handle stack size management: if I read some of the `stacker` docs correctly, that crate should be able to extend the size of the stack when it is about to overflow. If that is correct, we could use their techniques to allocate a new stack and zeroize the old one whenever our allocated stack is about to overflow, eliminating the primary remaining `# Safety` comment. Note: we may not be able to zeroize the old stack immediately as the stack switching process likely attempts to return to the old stack once execution completes; we might have to wait until execution completes before zeroizing all heap-stacks. +## Requires `asm!` + * Add an `asm!` alternative method for stack bleaching. In theory, it would be better to use `asm!` as we would not need to worry about the size of the allocated switched stack, and it would keep all of the code running on the actual stack and not the heap, possibly preserving performance. The problem with this is that using pointers from `asm!` and rust code to zero the space between the pointers results in segmentation faults on `x86_64`. * when testing this, assert that the two pointers are not equal to each other and not null. \ No newline at end of file From ba7f0f4eec0c0137810ba5e0171ee3aa680fff6c Mon Sep 17 00:00:00 2001 From: Noah Stiltner Date: Sun, 21 Sep 2025 18:20:30 -0500 Subject: [PATCH 13/20] added AlignedHeapStack to reuse stack space in sequential calls; see marked out TODO task for details; next up: better panics --- zeroize_stack/TODO.md | 2 - zeroize_stack/src/lib.rs | 77 ++++++++++++++++++++++------ zeroize_stack/tests/zeroize_stack.rs | 29 ++++++++++- 3 files changed, 89 insertions(+), 19 deletions(-) diff --git a/zeroize_stack/TODO.md b/zeroize_stack/TODO.md index 2867d31f..23360bfd 100644 --- a/zeroize_stack/TODO.md +++ b/zeroize_stack/TODO.md @@ -30,8 +30,6 @@ Copilot provided that code, but Gemini says that after the future is awaited, th ## Safe -* Allow stack reuse. More efficient to zero one stack shared by multiple functions. `impl Drop` and `ZeroizeOnDrop` and make the main public function only accept a mutable `HeapStack` struct, and allow for the stack to get zeroed on drop. - * Panic when the OS is `hermit` or it is running on `wasm32` or `wasm64`, as their stacks don't behave the same as all of the others. * Handle unwinds better: currently we return a `Result>`. The error case is a little bit tricky to handle, as dropping the error could cause a panic. The program should either panic, or return the panic payload's message. diff --git a/zeroize_stack/src/lib.rs b/zeroize_stack/src/lib.rs index 5bc57999..2d4bdf03 100644 --- a/zeroize_stack/src/lib.rs +++ b/zeroize_stack/src/lib.rs @@ -55,7 +55,7 @@ //! - Secure enclave transitions //! - Sanitizing temporary buffers in high-assurance systems -use zeroize::Zeroize; +use zeroize::{Zeroize, ZeroizeOnDrop}; extern crate alloc; @@ -74,6 +74,55 @@ type StackSwitchResult = T; use core::panic::{AssertUnwindSafe, UnwindSafe}; +/// An aligned HeapStack. Aligned to the alignment of a `u128`, and aligned using +/// safe code instead of manual alloc calls. This implements `ZeroizeOnDrop` and +/// contains a lock flag to prevent the stack from being reused while it is being +/// used. +pub struct AlignedHeapStack { + locked: bool, + stack: Vec, +} + +impl AlignedHeapStack { + /// Creates a new `AlignedHeapStack`. `psm` recommends using at least `4 KB` + /// of stack space. + /// + /// # Panics + /// + /// This function panics when `size_kb * 1024` overflows `isize`. + pub fn new(size_kb: usize) -> Self { + assert!( + size_kb as isize * 1024 > 0, + "size_kb must be positive and must not overflow isize when expanded to number of bytes instead of kb" + ); + let result = Self { + locked: false, + stack: create_aligned_vec(size_kb, align_of::()), + }; + // these may be redundant but I just want to be sure that the alignment doesn't + // change somehow + debug_assert_eq!(result.stack.as_ptr() as usize % align_of::(), 0); + debug_assert_eq!(result.stack.len() % align_of::(), 0); + result + } + + fn is_locked(&self) -> bool { + self.locked + } + + fn set_lock(&mut self, locked: bool) { + self.locked = locked; + } +} + +impl Drop for AlignedHeapStack { + fn drop(&mut self) { + self.stack.zeroize(); + } +} + +impl ZeroizeOnDrop for AlignedHeapStack {} + psm::psm_stack_manipulation! { yes { /// Executes a function/closure and clears the function's stack by using @@ -84,9 +133,11 @@ psm::psm_stack_manipulation! { /// /// # Arguments /// - /// * `stack_size_kb` - how large the stack will be. `psm` recommends at least - /// `4 KB` of stack size, but the total size cannot overflow an `isize`. Also, - /// some architectures might consume more memory in the stack, such as SPARC. + /// * `aligned_heap_stack` - the heap-based aligned region of memory to + /// be used as the stack. `psm` recommends at least `4 KB` of stack + /// space, but the total size cannot overflow an `isize`. Also, + /// some architectures might consume more memory in the stack, such as + /// SPARC. /// * `crypto_fn` - the code to run while on the separate stack. /// /// ## Panicking @@ -106,28 +157,24 @@ psm::psm_stack_manipulation! { /// make it easier to debug as the function(s) should show up in backtraces. /// /// # Returns - /// + /// /// * If `std` is enabled, this returns a `Result>` /// * Otherwise, this returns `R` directly; no panics are caught. - /// + /// /// # Safety /// /// * The stack needs to be large enough for `crypto_fn()` to execute without /// overflow. /// * `nostd` only: `crypto_fn()` must not unwind or return control flow by any other means /// than by directly returning. - pub unsafe fn exec_on_sanitized_stack(stack_size_kb: isize, crypto_fn: F) -> StackSwitchResult + pub unsafe fn exec_on_sanitized_stack(aligned_heap_stack: &mut AlignedHeapStack, crypto_fn: F) -> StackSwitchResult where F: FnOnce() -> R + UnwindSafe, { - assert!( - stack_size_kb * 1024 > 0, - "Stack size must be greater than 0 kb and `* 1024` must not overflow `isize`" - ); - let mut stack = create_aligned_vec(stack_size_kb as usize, align_of::()); - + assert!(!aligned_heap_stack.is_locked(), "AlignedHeapStack was locked. You must not use it while it is being used already!"); + aligned_heap_stack.set_lock(true); let res = unsafe { - psm::on_stack(stack.as_mut_ptr(), stack.len(), || { + psm::on_stack(aligned_heap_stack.stack.as_mut_ptr(), aligned_heap_stack.stack.len(), || { #[cfg(not(feature = "std"))] { crypto_fn() @@ -138,7 +185,7 @@ psm::psm_stack_manipulation! { } }) }; - stack.zeroize(); + aligned_heap_stack.set_lock(false); res } } diff --git a/zeroize_stack/tests/zeroize_stack.rs b/zeroize_stack/tests/zeroize_stack.rs index 2dfd22b0..4f515224 100644 --- a/zeroize_stack/tests/zeroize_stack.rs +++ b/zeroize_stack/tests/zeroize_stack.rs @@ -1,7 +1,9 @@ //! Stack sanitization integration tests mod stack_sanitization_tests { - use zeroize_stack::exec_on_sanitized_stack; + use std::panic::AssertUnwindSafe; + + use zeroize_stack::{AlignedHeapStack, exec_on_sanitized_stack}; #[inline(never)] fn dummy_fn() -> (*const u8, u64) { @@ -12,10 +14,33 @@ mod stack_sanitization_tests { #[test] fn stack_sanitization_v2() { - let result = unsafe { exec_on_sanitized_stack(4, || dummy_fn()) }; + let mut heap_stack = AlignedHeapStack::new(4); + let result = unsafe { exec_on_sanitized_stack(&mut heap_stack, || dummy_fn()) }; assert_eq!(result.unwrap().1, 12345); // results in segmentation fault, which is somewhat normal... just wanted // to try it // assert_eq!(unsafe {*result.0}, 42); } + + #[test] + fn allow_stack_reuse_between_calls() { + let mut heap_stack = AlignedHeapStack::new(4); + let result_1 = unsafe { exec_on_sanitized_stack(&mut heap_stack, || dummy_fn()) }; + assert!(result_1.is_ok()); + assert_eq!(result_1.unwrap().1, 12345); + let result_2 = unsafe { exec_on_sanitized_stack(&mut heap_stack, || dummy_fn()) }; + assert!(result_2.is_ok()); + assert_eq!(result_2.unwrap().1, 12345); + } + + fn non_returning_function(v: &mut u32) { + *v += 5; + } + #[test] + fn non_returning_function_test() { + let mut heap_stack = AlignedHeapStack::new(4); + let mut v = 0; + unsafe { exec_on_sanitized_stack(&mut heap_stack, AssertUnwindSafe(|| non_returning_function(&mut v)))}.unwrap(); + assert_eq!(v, 5); + } } From b820c3a753bf1362837f265a74afcdd54af9e40e Mon Sep 17 00:00:00 2001 From: Noah Stiltner Date: Wed, 24 Sep 2025 09:05:23 -0500 Subject: [PATCH 14/20] update TODO and some fmt looking changes --- zeroize_stack/TODO.md | 7 +++++-- zeroize_stack/src/lib.rs | 14 +++++++------- zeroize_stack/tests/zeroize_stack.rs | 8 +++++++- 3 files changed, 19 insertions(+), 10 deletions(-) diff --git a/zeroize_stack/TODO.md b/zeroize_stack/TODO.md index 23360bfd..52290f25 100644 --- a/zeroize_stack/TODO.md +++ b/zeroize_stack/TODO.md @@ -30,10 +30,13 @@ Copilot provided that code, but Gemini says that after the future is awaited, th ## Safe -* Panic when the OS is `hermit` or it is running on `wasm32` or `wasm64`, as their stacks don't behave the same as all of the others. - * Handle unwinds better: currently we return a `Result>`. The error case is a little bit tricky to handle, as dropping the error could cause a panic. The program should either panic, or return the panic payload's message. +* Either: + * Panic when the OS is `hermit` or it is running on `wasm32` or `wasm64`, as their stacks don't behave the same as all of the others. + * Run the closure without `psm::on_stack` and generate a compiler warning stating that the target's stack layout is not supported with basic stack switching. + * Implement different types of `AlignedHeapStack` to cover `wasm32` and `hermit` as performed in the `stacker` crate. + ## Would require a PR to `stacker` to zero the allocated stack on drop * Use stacker crate to handle stack size management: if I read some of the `stacker` docs correctly, that crate should be able to extend the size of the stack when it is about to overflow. If that is correct, we could use their techniques to allocate a new stack and zeroize the old one whenever our allocated stack is about to overflow, eliminating the primary remaining `# Safety` comment. Note: we may not be able to zeroize the old stack immediately as the stack switching process likely attempts to return to the old stack once execution completes; we might have to wait until execution completes before zeroizing all heap-stacks. diff --git a/zeroize_stack/src/lib.rs b/zeroize_stack/src/lib.rs index 2d4bdf03..3e5870d5 100644 --- a/zeroize_stack/src/lib.rs +++ b/zeroize_stack/src/lib.rs @@ -84,11 +84,11 @@ pub struct AlignedHeapStack { } impl AlignedHeapStack { - /// Creates a new `AlignedHeapStack`. `psm` recommends using at least `4 KB` + /// Creates a new `AlignedHeapStack`. `psm` recommends using at least `4 KB` /// of stack space. - /// + /// /// # Panics - /// + /// /// This function panics when `size_kb * 1024` overflows `isize`. pub fn new(size_kb: usize) -> Self { assert!( @@ -99,7 +99,7 @@ impl AlignedHeapStack { locked: false, stack: create_aligned_vec(size_kb, align_of::()), }; - // these may be redundant but I just want to be sure that the alignment doesn't + // these may be redundant but I just want to be sure that the alignment doesn't // change somehow debug_assert_eq!(result.stack.as_ptr() as usize % align_of::(), 0); debug_assert_eq!(result.stack.len() % align_of::(), 0); @@ -133,10 +133,10 @@ psm::psm_stack_manipulation! { /// /// # Arguments /// - /// * `aligned_heap_stack` - the heap-based aligned region of memory to - /// be used as the stack. `psm` recommends at least `4 KB` of stack + /// * `aligned_heap_stack` - the heap-based aligned region of memory to + /// be used as the stack. `psm` recommends at least `4 KB` of stack /// space, but the total size cannot overflow an `isize`. Also, - /// some architectures might consume more memory in the stack, such as + /// some architectures might consume more memory in the stack, such as /// SPARC. /// * `crypto_fn` - the code to run while on the separate stack. /// diff --git a/zeroize_stack/tests/zeroize_stack.rs b/zeroize_stack/tests/zeroize_stack.rs index 4f515224..d616db2d 100644 --- a/zeroize_stack/tests/zeroize_stack.rs +++ b/zeroize_stack/tests/zeroize_stack.rs @@ -40,7 +40,13 @@ mod stack_sanitization_tests { fn non_returning_function_test() { let mut heap_stack = AlignedHeapStack::new(4); let mut v = 0; - unsafe { exec_on_sanitized_stack(&mut heap_stack, AssertUnwindSafe(|| non_returning_function(&mut v)))}.unwrap(); + unsafe { + exec_on_sanitized_stack( + &mut heap_stack, + AssertUnwindSafe(|| non_returning_function(&mut v)), + ) + } + .unwrap(); assert_eq!(v, 5); } } From 5e8f7c21bc08a96aed59ec0bf232715b1468f0d2 Mon Sep 17 00:00:00 2001 From: Noah Stiltner Date: Mon, 22 Dec 2025 15:53:31 -0600 Subject: [PATCH 15/20] added raw code from stacker --- zeroize_stack/Cargo.toml | 4 +- zeroize_stack/src/heap/alloc.rs | 47 ++++++++++++++ zeroize_stack/src/heap/mmap.rs | 105 ++++++++++++++++++++++++++++++++ zeroize_stack/src/heap/mod.rs | 17 ++++++ zeroize_stack/src/lib.rs | 3 + 5 files changed, 175 insertions(+), 1 deletion(-) create mode 100644 zeroize_stack/src/heap/alloc.rs create mode 100644 zeroize_stack/src/heap/mmap.rs create mode 100644 zeroize_stack/src/heap/mod.rs diff --git a/zeroize_stack/Cargo.toml b/zeroize_stack/Cargo.toml index ada2acf0..39951295 100644 --- a/zeroize_stack/Cargo.toml +++ b/zeroize_stack/Cargo.toml @@ -20,7 +20,9 @@ psm = "0.1.26" zeroize = { version = "1.0" } [features] -default = ["std"] +default = ["heap", "stack", "std"] +heap = [] +stack = [] std = [] [package.metadata.docs.rs] diff --git a/zeroize_stack/src/heap/alloc.rs b/zeroize_stack/src/heap/alloc.rs new file mode 100644 index 00000000..d1585347 --- /dev/null +++ b/zeroize_stack/src/heap/alloc.rs @@ -0,0 +1,47 @@ +use crate::{get_stack_limit, set_stack_limit}; + +pub struct StackRestoreGuard { + new_stack: *mut u8, + stack_bytes: usize, + old_stack_limit: Option, +} + +const ALIGNMENT: usize = 16; + +impl StackRestoreGuard { + pub fn new(stack_bytes: usize) -> StackRestoreGuard { + // On these platforms we do not use stack guards. this is very unfortunate, + // but there is not much we can do about it without OS support. + // We simply allocate the requested size from the global allocator with a suitable + // alignment. + let stack_bytes = stack_bytes + .checked_add(ALIGNMENT - 1) + .expect("unreasonably large stack requested") + / ALIGNMENT + * ALIGNMENT; + let layout = std::alloc::Layout::from_size_align(stack_bytes, ALIGNMENT).unwrap(); + let ptr = unsafe { std::alloc::alloc(layout) }; + assert!(!ptr.is_null(), "unable to allocate stack"); + StackRestoreGuard { + new_stack: ptr, + stack_bytes, + old_stack_limit: get_stack_limit(), + } + } + + pub fn stack_area(&self) -> (*mut u8, usize) { + (self.new_stack, self.stack_bytes) + } +} + +impl Drop for StackRestoreGuard { + fn drop(&mut self) { + unsafe { + std::alloc::dealloc( + self.new_stack, + std::alloc::Layout::from_size_align_unchecked(self.stack_bytes, ALIGNMENT), + ); + } + set_stack_limit(self.old_stack_limit); + } +} \ No newline at end of file diff --git a/zeroize_stack/src/heap/mmap.rs b/zeroize_stack/src/heap/mmap.rs new file mode 100644 index 00000000..50afc151 --- /dev/null +++ b/zeroize_stack/src/heap/mmap.rs @@ -0,0 +1,105 @@ +use crate::{get_stack_limit, set_stack_limit}; + +pub struct StackRestoreGuard { + mapping: *mut u8, + size_with_guard: usize, + page_size: usize, + old_stack_limit: Option, +} + +impl StackRestoreGuard { + pub fn new(requested_size: usize) -> StackRestoreGuard { + // For maximum portability we want to produce a stack that is aligned to a page and has + // a size that’s a multiple of page size. It is natural to use mmap to allocate + // these pages. Furthermore, we want to allocate two extras pages for the stack guard. + // To achieve that we do our calculations in number of pages and convert to bytes last. + let page_size = page_size(); + let requested_pages = requested_size + .checked_add(page_size - 1) + .expect("unreasonably large stack requested") + / page_size; + let page_count_with_guard = std::cmp::max(1, requested_pages) + 2; + let size_with_guard = page_count_with_guard + .checked_mul(page_size) + .expect("unreasonably large stack requested"); + + unsafe { + let new_stack = libc::mmap( + std::ptr::null_mut(), + size_with_guard, + libc::PROT_NONE, + libc::MAP_PRIVATE | libc::MAP_ANON, + -1, // Some implementations assert fd = -1 if MAP_ANON is specified + 0, + ); + assert_ne!( + new_stack, + libc::MAP_FAILED, + "mmap failed to allocate stack: {}", + std::io::Error::last_os_error() + ); + let guard = StackRestoreGuard { + mapping: new_stack as *mut u8, + page_size, + size_with_guard, + old_stack_limit: get_stack_limit(), + }; + // We leave two guard pages without read/write access in our allocation. + // There is one guard page below the stack and another above it. + let above_guard_page = new_stack.add(page_size); + #[cfg(not(target_os = "openbsd"))] + let result = libc::mprotect( + above_guard_page, + size_with_guard - 2 * page_size, + libc::PROT_READ | libc::PROT_WRITE, + ); + #[cfg(target_os = "openbsd")] + let result = if libc::mmap( + above_guard_page, + size_with_guard - 2 * page_size, + libc::PROT_READ | libc::PROT_WRITE, + libc::MAP_FIXED | libc::MAP_PRIVATE | libc::MAP_ANON | libc::MAP_STACK, + -1, + 0, + ) == above_guard_page + { + 0 + } else { + -1 + }; + assert_ne!( + result, + -1, + "mprotect/mmap failed: {}", + std::io::Error::last_os_error() + ); + guard + } + } + + // TODO this should return a *mut [u8], but pointer slices only got proper support with Rust 1.79. + pub fn stack_area(&self) -> (*mut u8, usize) { + unsafe { + ( + self.mapping.add(self.page_size), + self.size_with_guard - self.page_size, + ) + } + } +} + +impl Drop for StackRestoreGuard { + fn drop(&mut self) { + unsafe { + // FIXME: check the error code and decide what to do with it. + // Perhaps a debug_assertion? + libc::munmap(self.mapping as *mut std::ffi::c_void, self.size_with_guard); + } + set_stack_limit(self.old_stack_limit); + } +} + +fn page_size() -> usize { + // FIXME: consider caching the page size. + unsafe { libc::sysconf(libc::_SC_PAGE_SIZE) as usize } +} \ No newline at end of file diff --git a/zeroize_stack/src/heap/mod.rs b/zeroize_stack/src/heap/mod.rs new file mode 100644 index 00000000..177d01b7 --- /dev/null +++ b/zeroize_stack/src/heap/mod.rs @@ -0,0 +1,17 @@ +use psm::psm_stack_manipulation; + +psm_stack_manipulation! { + yes { + #[cfg(not(any(target_family = "wasm", target_os = "hermit")))] + #[path = "alloc.rs"] + mod heap_struct; + + #[cfg(any(target_family = "wasm", target_os = "hermit"))] + #[path = "mmap.rs"] + mod heap_struct; + } + + no { + + } +} \ No newline at end of file diff --git a/zeroize_stack/src/lib.rs b/zeroize_stack/src/lib.rs index 3e5870d5..7b4e08f3 100644 --- a/zeroize_stack/src/lib.rs +++ b/zeroize_stack/src/lib.rs @@ -55,6 +55,9 @@ //! - Secure enclave transitions //! - Sanitizing temporary buffers in high-assurance systems +#[cfg(feature = "heap")] +mod heap; + use zeroize::{Zeroize, ZeroizeOnDrop}; extern crate alloc; From a098bf8aec4a1042789cc78f3de533c11fee1923 Mon Sep 17 00:00:00 2001 From: Noah Stiltner Date: Tue, 23 Dec 2025 10:12:59 -0600 Subject: [PATCH 16/20] modified stacker code --- zeroize_stack/Cargo.toml | 3 +- zeroize_stack/src/heap/alloc.rs | 52 ++++++-- zeroize_stack/src/heap/mmap.rs | 71 ++++++++-- zeroize_stack/src/heap/mod.rs | 97 +++++++++++++- zeroize_stack/src/lib.rs | 189 +-------------------------- zeroize_stack/tests/zeroize_stack.rs | 27 ++-- 6 files changed, 212 insertions(+), 227 deletions(-) diff --git a/zeroize_stack/Cargo.toml b/zeroize_stack/Cargo.toml index 39951295..5fa9ef71 100644 --- a/zeroize_stack/Cargo.toml +++ b/zeroize_stack/Cargo.toml @@ -16,12 +16,13 @@ edition = "2024" rust-version = "1.85" [dependencies] +libc = "0.2.156" psm = "0.1.26" zeroize = { version = "1.0" } [features] default = ["heap", "stack", "std"] -heap = [] +heap = ["std"] stack = [] std = [] diff --git a/zeroize_stack/src/heap/alloc.rs b/zeroize_stack/src/heap/alloc.rs index d1585347..c8eba653 100644 --- a/zeroize_stack/src/heap/alloc.rs +++ b/zeroize_stack/src/heap/alloc.rs @@ -1,15 +1,39 @@ -use crate::{get_stack_limit, set_stack_limit}; +//! This file contains code derived from the Rust project, +//! originally written by Alex Crichton and licensed under +//! the Apache License, Version 2.0 or the MIT license, at +//! your option. +//! +//! Copyright (c) 2014 Alex Crichton +//! +//! Licensed under the Apache License, Version 2.0 or the MIT license +//! , at your +//! option. This file may not be copied, modified, or distributed +//! except according to those terms. -pub struct StackRestoreGuard { +use core::{ptr, sync::atomic}; + +extern crate std; + +/// A zeroizing heap-based stack. Feed one of these into the `switch_stacks` +/// function. +pub struct ZeroizingHeapStack { new_stack: *mut u8, stack_bytes: usize, - old_stack_limit: Option, } -const ALIGNMENT: usize = 16; +const ALIGNMENT: usize = 32; -impl StackRestoreGuard { - pub fn new(stack_bytes: usize) -> StackRestoreGuard { +impl ZeroizingHeapStack { + /// Initializes a new "Zeroizing Heap Stack". To be fed into the `switch_stacks` + /// function, and it can be reused, but it must not be reused while it is in use. + /// The borrow-checker should enforce this. + pub fn new(stack_kb: usize) -> ZeroizingHeapStack { + let stack_bytes = stack_kb * 1024; + assert!( + stack_bytes as isize > 0, + "stack_kb must be positive and must not overflow isize when expanded to number of bytes instead of KB" + ); // On these platforms we do not use stack guards. this is very unfortunate, // but there is not much we can do about it without OS support. // We simply allocate the requested size from the global allocator with a suitable @@ -22,26 +46,32 @@ impl StackRestoreGuard { let layout = std::alloc::Layout::from_size_align(stack_bytes, ALIGNMENT).unwrap(); let ptr = unsafe { std::alloc::alloc(layout) }; assert!(!ptr.is_null(), "unable to allocate stack"); - StackRestoreGuard { + ZeroizingHeapStack { new_stack: ptr, stack_bytes, - old_stack_limit: get_stack_limit(), } } - + /// Returns (`start ptr of usable stack`, `size of usable stack`). pub fn stack_area(&self) -> (*mut u8, usize) { (self.new_stack, self.stack_bytes) } } -impl Drop for StackRestoreGuard { +impl Drop for ZeroizingHeapStack { fn drop(&mut self) { + let mut ptr = self.new_stack as *mut u128; + for _ in 0..self.stack_bytes / size_of::() { + unsafe { + ptr::write_volatile(ptr, 0); + ptr = ptr.add(1); + } + } + atomic::compiler_fence(atomic::Ordering::SeqCst); unsafe { std::alloc::dealloc( self.new_stack, std::alloc::Layout::from_size_align_unchecked(self.stack_bytes, ALIGNMENT), ); } - set_stack_limit(self.old_stack_limit); } } \ No newline at end of file diff --git a/zeroize_stack/src/heap/mmap.rs b/zeroize_stack/src/heap/mmap.rs index 50afc151..db4650c9 100644 --- a/zeroize_stack/src/heap/mmap.rs +++ b/zeroize_stack/src/heap/mmap.rs @@ -1,20 +1,43 @@ -use crate::{get_stack_limit, set_stack_limit}; +//! This file contains code derived from the Rust project, +//! originally written by Alex Crichton and licensed under +//! the Apache License, Version 2.0 or the MIT license, at +//! your option. +//! +//! Copyright (c) 2014 Alex Crichton +//! +//! Licensed under the Apache License, Version 2.0 or the MIT license +//! , at your +//! option. This file may not be copied, modified, or distributed +//! except according to those terms. -pub struct StackRestoreGuard { +use core::ptr; + +use zeroize::ZeroizeOnDrop; + +extern crate std; + +/// A zeroizing heap-based stack. Feed one of these into the `switch_stacks` +/// function. +pub struct ZeroizingHeapStack { mapping: *mut u8, size_with_guard: usize, page_size: usize, - old_stack_limit: Option, } -impl StackRestoreGuard { - pub fn new(requested_size: usize) -> StackRestoreGuard { +impl ZeroizingHeapStack { + /// Initializes a new "Zeroizing Heap Stack". To be fed into the `switch_stacks` + /// function, and it can be reused, but it must not be reused while it is in use. + /// The borrow-checker should enforce this. + pub fn new(stack_kb: usize) -> ZeroizingHeapStack { // For maximum portability we want to produce a stack that is aligned to a page and has // a size that’s a multiple of page size. It is natural to use mmap to allocate // these pages. Furthermore, we want to allocate two extras pages for the stack guard. // To achieve that we do our calculations in number of pages and convert to bytes last. let page_size = page_size(); - let requested_pages = requested_size + let requested_pages = stack_kb + .checked_mul(1024) + .expect("unreasonably large stack requested") .checked_add(page_size - 1) .expect("unreasonably large stack requested") / page_size; @@ -25,7 +48,7 @@ impl StackRestoreGuard { unsafe { let new_stack = libc::mmap( - std::ptr::null_mut(), + ptr::null_mut(), size_with_guard, libc::PROT_NONE, libc::MAP_PRIVATE | libc::MAP_ANON, @@ -38,11 +61,10 @@ impl StackRestoreGuard { "mmap failed to allocate stack: {}", std::io::Error::last_os_error() ); - let guard = StackRestoreGuard { + let guard = ZeroizingHeapStack { mapping: new_stack as *mut u8, page_size, size_with_guard, - old_stack_limit: get_stack_limit(), }; // We leave two guard pages without read/write access in our allocation. // There is one guard page below the stack and another above it. @@ -78,28 +100,53 @@ impl StackRestoreGuard { } // TODO this should return a *mut [u8], but pointer slices only got proper support with Rust 1.79. + /// Returns (`start ptr of usable stack`, `size of usable stack`). pub fn stack_area(&self) -> (*mut u8, usize) { unsafe { ( self.mapping.add(self.page_size), - self.size_with_guard - self.page_size, + self.size_with_guard - 2 * self.page_size, ) } } } -impl Drop for StackRestoreGuard { +impl Drop for ZeroizingHeapStack { fn drop(&mut self) { + let (mut ptr, size) = self.stack_area(); + for _ in 0..size / size_of::() { + unsafe { + ptr::write_volatile(ptr, 0); + ptr = ptr.add(1); + } + } unsafe { // FIXME: check the error code and decide what to do with it. // Perhaps a debug_assertion? libc::munmap(self.mapping as *mut std::ffi::c_void, self.size_with_guard); } - set_stack_limit(self.old_stack_limit); } } +impl ZeroizeOnDrop for ZeroizingHeapStack {} + fn page_size() -> usize { // FIXME: consider caching the page size. unsafe { libc::sysconf(libc::_SC_PAGE_SIZE) as usize } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn stack_size() { + for kb in 1..64 { + let stack = ZeroizingHeapStack::new(kb); + assert_eq!( + stack.stack_area().1, + ((kb * 1024 + page_size() - 1) / page_size()) * page_size() + ); + } + } } \ No newline at end of file diff --git a/zeroize_stack/src/heap/mod.rs b/zeroize_stack/src/heap/mod.rs index 177d01b7..411e296e 100644 --- a/zeroize_stack/src/heap/mod.rs +++ b/zeroize_stack/src/heap/mod.rs @@ -1,17 +1,108 @@ +//! Heap-based stack zeroization module. This module uses Rust-Lang's `psm` +//! crate to switch stacks to a stack that is allocated on the heap +//! (`ZeroizingHeapStack`) and then executes a callback function on that +//! stack. You can reuse this stack as many times as you want, and when it is +//! dropped, it will be zeroized. + +use core::panic::UnwindSafe; + use psm::psm_stack_manipulation; +#[cfg(feature = "std")] +extern crate std; + psm_stack_manipulation! { yes { - #[cfg(not(any(target_family = "wasm", target_os = "hermit")))] + #[cfg(any(target_family = "wasm", target_os = "hermit"))] #[path = "alloc.rs"] mod heap_struct; - #[cfg(any(target_family = "wasm", target_os = "hermit"))] + #[cfg(not(any(target_family = "wasm", target_os = "hermit")))] #[path = "mmap.rs"] mod heap_struct; + + pub use heap_struct::ZeroizingHeapStack; + + /// Executes a closure on a provided zeroizing heap-based stack. + /// + /// This function does not clear CPU registers. + /// + /// # Arguments + /// + /// * `zeroizing_heap_stack` - the heap-based stack you plan on using + /// for running the closure. `psm` recommends at least `4 KiB` of stack space, + /// but the total size cannot overflow an `isize`. Also, some architectures + /// might consume more memory in the stack, such as SPARC. + /// + /// * `crypto_fn` - the code to run while on the switched stack. + /// + /// ## Panicking + /// + /// This function does not panic, but it can segfault. + /// + /// ## Segfaults + /// + /// This code will cause a segmentation fault if your closure consumes + /// more stack space than what you have allocated. + /// + /// ## Debugging + /// + /// Using `#[inline(never)]` on the closure's function definition(s) could + /// make it easier to debug as the function(s) should then show up in + /// backtraces. + /// + /// # Returns + /// + /// This function returns the returned value from the closure. + /// + /// # Safety + /// + /// * The stack needs to be large enough for `crypto_fn()` to execute + /// without overflowing. + /// + /// * For `nostd`, you should use `panic = 'abort'` to avoid unwinding + /// on the switched stack. Unwinding across stack boundaries could cause + /// undefined behavior. `nostd` code must not unwind or return control + /// flow by any other means. + pub unsafe fn switch_stacks(zeroizing_heap_stack: &mut ZeroizingHeapStack, crypto_fn: F) -> R + where + F: FnOnce() -> R + UnwindSafe, + { + let (stack_ptr, size) = zeroizing_heap_stack.stack_area(); + unsafe { + let panic = psm::on_stack(stack_ptr, size, move || { + #[cfg(feature = "std")] + { + std::panic::catch_unwind(std::panic::AssertUnwindSafe(crypto_fn)) + } + #[cfg(not(feature = "std"))] + return crypto_fn() + }); + match panic { + Err(p) => std::panic::resume_unwind(p), + Ok(v) => v + } + } + } } no { - + pub struct ZeroizingHeapStack; + compiler_warning(f) + impl ZeroizingHeapStack { + pub fn new(stack_kb: usize) -> Self { + let _ = stack_kb; + ZeroizingHeapStack + } + } + /// PSM is unavailable on this arch/target. + #[deprecated(note = "PSM is unavailable on this arch/target. Crypto closures will not run on a zeroizing stack.")] + pub unsafe fn switch_stacks(zeroizing_heap_stack: &mut ZeroizingHeapStack, crypto_fn: F) -> R + where + F: FnOnce() -> R + UnwindSafe, + { + let _ = zeroizing_heap_stack; + crypto_fn() + } } } \ No newline at end of file diff --git a/zeroize_stack/src/lib.rs b/zeroize_stack/src/lib.rs index 7b4e08f3..461482af 100644 --- a/zeroize_stack/src/lib.rs +++ b/zeroize_stack/src/lib.rs @@ -48,6 +48,9 @@ //! in the event that your stack-switched stack panics. Panicking on a separate //! stack can cause undefined behavior (UB), but if it can be caught with //! `std::panic::catch_unwind`, that aspect of the safety should be more safe. +//! +//! When using `nostd`, try to ensure that `panic = "abort"` to avoid the +//! unsafety of unwinding across stack boundaries. //! //! ## Use Cases //! @@ -56,188 +59,4 @@ //! - Sanitizing temporary buffers in high-assurance systems #[cfg(feature = "heap")] -mod heap; - -use zeroize::{Zeroize, ZeroizeOnDrop}; - -extern crate alloc; - -use alloc::{vec, vec::Vec}; - -#[cfg(feature = "std")] -extern crate std; -#[cfg(feature = "std")] -use core::any::Any; -#[cfg(feature = "std")] -use std::{boxed::Box, panic::catch_unwind}; -#[cfg(feature = "std")] -type StackSwitchResult = Result>; -#[cfg(not(feature = "std"))] -type StackSwitchResult = T; - -use core::panic::{AssertUnwindSafe, UnwindSafe}; - -/// An aligned HeapStack. Aligned to the alignment of a `u128`, and aligned using -/// safe code instead of manual alloc calls. This implements `ZeroizeOnDrop` and -/// contains a lock flag to prevent the stack from being reused while it is being -/// used. -pub struct AlignedHeapStack { - locked: bool, - stack: Vec, -} - -impl AlignedHeapStack { - /// Creates a new `AlignedHeapStack`. `psm` recommends using at least `4 KB` - /// of stack space. - /// - /// # Panics - /// - /// This function panics when `size_kb * 1024` overflows `isize`. - pub fn new(size_kb: usize) -> Self { - assert!( - size_kb as isize * 1024 > 0, - "size_kb must be positive and must not overflow isize when expanded to number of bytes instead of kb" - ); - let result = Self { - locked: false, - stack: create_aligned_vec(size_kb, align_of::()), - }; - // these may be redundant but I just want to be sure that the alignment doesn't - // change somehow - debug_assert_eq!(result.stack.as_ptr() as usize % align_of::(), 0); - debug_assert_eq!(result.stack.len() % align_of::(), 0); - result - } - - fn is_locked(&self) -> bool { - self.locked - } - - fn set_lock(&mut self, locked: bool) { - self.locked = locked; - } -} - -impl Drop for AlignedHeapStack { - fn drop(&mut self) { - self.stack.zeroize(); - } -} - -impl ZeroizeOnDrop for AlignedHeapStack {} - -psm::psm_stack_manipulation! { - yes { - /// Executes a function/closure and clears the function's stack by using - /// preallocated space on the heap as the function's stack, and then zeroing - /// that allocated space once the code has ran. - /// - /// This function does not clear the CPU registers. - /// - /// # Arguments - /// - /// * `aligned_heap_stack` - the heap-based aligned region of memory to - /// be used as the stack. `psm` recommends at least `4 KB` of stack - /// space, but the total size cannot overflow an `isize`. Also, - /// some architectures might consume more memory in the stack, such as - /// SPARC. - /// * `crypto_fn` - the code to run while on the separate stack. - /// - /// ## Panicking - /// - /// This function panics when `psm` detects that `on_stack` is unavailable. - /// - /// ## Errors - /// - /// With the `std` feature enabled, this function will result in an error when - /// the closure panics. You may want to log these errors securely, privately, - /// as cryptography panics could be a little revealing if displayed to - /// the end user. - /// - /// ## Debugging - /// - /// Using `#[inline(never)]` on the closure's function definition(s) could - /// make it easier to debug as the function(s) should show up in backtraces. - /// - /// # Returns - /// - /// * If `std` is enabled, this returns a `Result>` - /// * Otherwise, this returns `R` directly; no panics are caught. - /// - /// # Safety - /// - /// * The stack needs to be large enough for `crypto_fn()` to execute without - /// overflow. - /// * `nostd` only: `crypto_fn()` must not unwind or return control flow by any other means - /// than by directly returning. - pub unsafe fn exec_on_sanitized_stack(aligned_heap_stack: &mut AlignedHeapStack, crypto_fn: F) -> StackSwitchResult - where - F: FnOnce() -> R + UnwindSafe, - { - assert!(!aligned_heap_stack.is_locked(), "AlignedHeapStack was locked. You must not use it while it is being used already!"); - aligned_heap_stack.set_lock(true); - let res = unsafe { - psm::on_stack(aligned_heap_stack.stack.as_mut_ptr(), aligned_heap_stack.stack.len(), || { - #[cfg(not(feature = "std"))] - { - crypto_fn() - } - #[cfg(feature = "std")] - { - catch_unwind(AssertUnwindSafe(crypto_fn)) - } - }) - }; - aligned_heap_stack.set_lock(false); - res - } - } - no { - pub unsafe fn exec_on_sanitized_stack(_stack_size_kb: isize, _crypto_fn: F) -> StackSwitchResult - where - F: FnOnce() -> R, - { - panic!("Stack manipulation not possible on this platform") - } - } -} - -/// Round up to the nearest multiple of alignment -const fn align_up(value: usize, alignment: usize) -> usize { - (value + alignment - 1) & !(alignment - 1) -} - -/// Creates an aligned Vec with the specified size in KB and alignment. -/// -/// This helps ensure that the safety requirements are met when using -/// `fn secure_crypto_call_heap()`. -/// -/// Both the data pointer and length will be aligned to the specified boundary. -fn create_aligned_vec(size_kb: usize, alignment: usize) -> Vec { - let size_bytes = size_kb * 1024; - // checking one of the safety conditions of `psm::on_stack()` - assert!(size_bytes <= isize::MAX as usize); - - let aligned_size = align_up(size_bytes, alignment); - - // Allocate extra space to ensure we can find an aligned region - let mut vec = vec![0u8; aligned_size + alignment]; - - // Find the aligned position within the vec - let ptr_addr = vec.as_ptr() as usize; - let aligned_addr = align_up(ptr_addr, alignment); - let offset = aligned_addr - ptr_addr; - - // Remove elements from the beginning to align the start - vec.drain(0..offset); - - // Truncate to the exact aligned size we want - vec.truncate(aligned_size); - - // Verify alignment (these will be optimized out in release builds) - debug_assert_eq!(vec.as_ptr() as usize % alignment, 0); - debug_assert_eq!(vec.len() % alignment, 0); - debug_assert_eq!(vec.len(), aligned_size); - - vec -} +pub mod heap; \ No newline at end of file diff --git a/zeroize_stack/tests/zeroize_stack.rs b/zeroize_stack/tests/zeroize_stack.rs index d616db2d..f0c3cc01 100644 --- a/zeroize_stack/tests/zeroize_stack.rs +++ b/zeroize_stack/tests/zeroize_stack.rs @@ -3,7 +3,7 @@ mod stack_sanitization_tests { use std::panic::AssertUnwindSafe; - use zeroize_stack::{AlignedHeapStack, exec_on_sanitized_stack}; + use zeroize_stack::heap::{ZeroizingHeapStack, switch_stacks}; #[inline(never)] fn dummy_fn() -> (*const u8, u64) { @@ -14,9 +14,9 @@ mod stack_sanitization_tests { #[test] fn stack_sanitization_v2() { - let mut heap_stack = AlignedHeapStack::new(4); - let result = unsafe { exec_on_sanitized_stack(&mut heap_stack, || dummy_fn()) }; - assert_eq!(result.unwrap().1, 12345); + let mut heap_stack = ZeroizingHeapStack::new(4); + let result = unsafe { switch_stacks(&mut heap_stack, || dummy_fn()) }; + assert_eq!(result.1, 12345); // results in segmentation fault, which is somewhat normal... just wanted // to try it // assert_eq!(unsafe {*result.0}, 42); @@ -24,13 +24,11 @@ mod stack_sanitization_tests { #[test] fn allow_stack_reuse_between_calls() { - let mut heap_stack = AlignedHeapStack::new(4); - let result_1 = unsafe { exec_on_sanitized_stack(&mut heap_stack, || dummy_fn()) }; - assert!(result_1.is_ok()); - assert_eq!(result_1.unwrap().1, 12345); - let result_2 = unsafe { exec_on_sanitized_stack(&mut heap_stack, || dummy_fn()) }; - assert!(result_2.is_ok()); - assert_eq!(result_2.unwrap().1, 12345); + let mut heap_stack = ZeroizingHeapStack::new(4); + let result_1 = unsafe { switch_stacks(&mut heap_stack, || dummy_fn()) }; + assert_eq!(result_1.1, 12345); + let result_2 = unsafe { switch_stacks(&mut heap_stack, || dummy_fn()) }; + assert_eq!(result_2.1, 12345); } fn non_returning_function(v: &mut u32) { @@ -38,15 +36,14 @@ mod stack_sanitization_tests { } #[test] fn non_returning_function_test() { - let mut heap_stack = AlignedHeapStack::new(4); + let mut heap_stack = ZeroizingHeapStack::new(4); let mut v = 0; unsafe { - exec_on_sanitized_stack( + switch_stacks( &mut heap_stack, AssertUnwindSafe(|| non_returning_function(&mut v)), ) - } - .unwrap(); + }; assert_eq!(v, 5); } } From 9eaf91fd1c98f2089ed391c7838b278bcedcd392 Mon Sep 17 00:00:00 2001 From: Noah Stiltner Date: Tue, 23 Dec 2025 10:24:32 -0600 Subject: [PATCH 17/20] fmt --- zeroize_stack/src/heap/mod.rs | 66 +++++++++++++++++------------------ zeroize_stack/src/lib.rs | 6 ++-- 2 files changed, 36 insertions(+), 36 deletions(-) diff --git a/zeroize_stack/src/heap/mod.rs b/zeroize_stack/src/heap/mod.rs index 411e296e..5a40c733 100644 --- a/zeroize_stack/src/heap/mod.rs +++ b/zeroize_stack/src/heap/mod.rs @@ -1,7 +1,7 @@ -//! Heap-based stack zeroization module. This module uses Rust-Lang's `psm` -//! crate to switch stacks to a stack that is allocated on the heap -//! (`ZeroizingHeapStack`) and then executes a callback function on that -//! stack. You can reuse this stack as many times as you want, and when it is +//! Heap-based stack zeroization module. This module uses Rust-Lang's `psm` +//! crate to switch stacks to a stack that is allocated on the heap +//! (`ZeroizingHeapStack`) and then executes a callback function on that +//! stack. You can reuse this stack as many times as you want, and when it is //! dropped, it will be zeroized. use core::panic::UnwindSafe; @@ -20,52 +20,52 @@ psm_stack_manipulation! { #[cfg(not(any(target_family = "wasm", target_os = "hermit")))] #[path = "mmap.rs"] mod heap_struct; - + pub use heap_struct::ZeroizingHeapStack; /// Executes a closure on a provided zeroizing heap-based stack. - /// + /// /// This function does not clear CPU registers. - /// + /// /// # Arguments - /// - /// * `zeroizing_heap_stack` - the heap-based stack you plan on using - /// for running the closure. `psm` recommends at least `4 KiB` of stack space, - /// but the total size cannot overflow an `isize`. Also, some architectures + /// + /// * `zeroizing_heap_stack` - the heap-based stack you plan on using + /// for running the closure. `psm` recommends at least `4 KiB` of stack space, + /// but the total size cannot overflow an `isize`. Also, some architectures /// might consume more memory in the stack, such as SPARC. - /// + /// /// * `crypto_fn` - the code to run while on the switched stack. - /// + /// /// ## Panicking - /// + /// /// This function does not panic, but it can segfault. - /// + /// /// ## Segfaults - /// - /// This code will cause a segmentation fault if your closure consumes + /// + /// This code will cause a segmentation fault if your closure consumes /// more stack space than what you have allocated. - /// + /// /// ## Debugging - /// - /// Using `#[inline(never)]` on the closure's function definition(s) could - /// make it easier to debug as the function(s) should then show up in + /// + /// Using `#[inline(never)]` on the closure's function definition(s) could + /// make it easier to debug as the function(s) should then show up in /// backtraces. - /// + /// /// # Returns - /// + /// /// This function returns the returned value from the closure. - /// + /// /// # Safety - /// - /// * The stack needs to be large enough for `crypto_fn()` to execute + /// + /// * The stack needs to be large enough for `crypto_fn()` to execute /// without overflowing. - /// - /// * For `nostd`, you should use `panic = 'abort'` to avoid unwinding - /// on the switched stack. Unwinding across stack boundaries could cause - /// undefined behavior. `nostd` code must not unwind or return control + /// + /// * For `nostd`, you should use `panic = 'abort'` to avoid unwinding + /// on the switched stack. Unwinding across stack boundaries could cause + /// undefined behavior. `nostd` code must not unwind or return control /// flow by any other means. pub unsafe fn switch_stacks(zeroizing_heap_stack: &mut ZeroizingHeapStack, crypto_fn: F) -> R - where + where F: FnOnce() -> R + UnwindSafe, { let (stack_ptr, size) = zeroizing_heap_stack.stack_area(); @@ -98,11 +98,11 @@ psm_stack_manipulation! { /// PSM is unavailable on this arch/target. #[deprecated(note = "PSM is unavailable on this arch/target. Crypto closures will not run on a zeroizing stack.")] pub unsafe fn switch_stacks(zeroizing_heap_stack: &mut ZeroizingHeapStack, crypto_fn: F) -> R - where + where F: FnOnce() -> R + UnwindSafe, { let _ = zeroizing_heap_stack; crypto_fn() } } -} \ No newline at end of file +} diff --git a/zeroize_stack/src/lib.rs b/zeroize_stack/src/lib.rs index 461482af..a1c59ab7 100644 --- a/zeroize_stack/src/lib.rs +++ b/zeroize_stack/src/lib.rs @@ -48,8 +48,8 @@ //! in the event that your stack-switched stack panics. Panicking on a separate //! stack can cause undefined behavior (UB), but if it can be caught with //! `std::panic::catch_unwind`, that aspect of the safety should be more safe. -//! -//! When using `nostd`, try to ensure that `panic = "abort"` to avoid the +//! +//! When using `nostd`, try to ensure that `panic = "abort"` to avoid the //! unsafety of unwinding across stack boundaries. //! //! ## Use Cases @@ -59,4 +59,4 @@ //! - Sanitizing temporary buffers in high-assurance systems #[cfg(feature = "heap")] -pub mod heap; \ No newline at end of file +pub mod heap; From 0661279561dcbd717dc428635db72171da1725c8 Mon Sep 17 00:00:00 2001 From: Noah Stiltner Date: Tue, 23 Dec 2025 10:26:16 -0600 Subject: [PATCH 18/20] clippy --- zeroize_stack/tests/zeroize_stack.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/zeroize_stack/tests/zeroize_stack.rs b/zeroize_stack/tests/zeroize_stack.rs index f0c3cc01..8671f4f5 100644 --- a/zeroize_stack/tests/zeroize_stack.rs +++ b/zeroize_stack/tests/zeroize_stack.rs @@ -15,7 +15,7 @@ mod stack_sanitization_tests { #[test] fn stack_sanitization_v2() { let mut heap_stack = ZeroizingHeapStack::new(4); - let result = unsafe { switch_stacks(&mut heap_stack, || dummy_fn()) }; + let result = unsafe { switch_stacks(&mut heap_stack, dummy_fn) }; assert_eq!(result.1, 12345); // results in segmentation fault, which is somewhat normal... just wanted // to try it @@ -25,9 +25,9 @@ mod stack_sanitization_tests { #[test] fn allow_stack_reuse_between_calls() { let mut heap_stack = ZeroizingHeapStack::new(4); - let result_1 = unsafe { switch_stacks(&mut heap_stack, || dummy_fn()) }; + let result_1 = unsafe { switch_stacks(&mut heap_stack, dummy_fn) }; assert_eq!(result_1.1, 12345); - let result_2 = unsafe { switch_stacks(&mut heap_stack, || dummy_fn()) }; + let result_2 = unsafe { switch_stacks(&mut heap_stack, dummy_fn) }; assert_eq!(result_2.1, 12345); } From 1bb3800ac6847d4bfb9054cb870d70bb6ba75190 Mon Sep 17 00:00:00 2001 From: Noah Stiltner Date: Tue, 23 Dec 2025 10:27:21 -0600 Subject: [PATCH 19/20] audit fix? --- Cargo.lock | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index c545c412..1f347605 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -288,6 +288,12 @@ dependencies = [ "keccak", ] +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + [[package]] name = "subtle" version = "2.6.1" @@ -354,6 +360,7 @@ dependencies = [ name = "zeroize_stack" version = "0.1.0" dependencies = [ + "libc", "psm", - "zeroize 1.8.1", + "zeroize 1.8.2", ] From afbbb0e8678e63dd0f75dd6f026fc693cf7fc52b Mon Sep 17 00:00:00 2001 From: Noah Stiltner Date: Tue, 23 Dec 2025 10:30:32 -0600 Subject: [PATCH 20/20] clippy again --- zeroize_stack/src/heap/mmap.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zeroize_stack/src/heap/mmap.rs b/zeroize_stack/src/heap/mmap.rs index db4650c9..9dc4ff02 100644 --- a/zeroize_stack/src/heap/mmap.rs +++ b/zeroize_stack/src/heap/mmap.rs @@ -145,7 +145,7 @@ mod tests { let stack = ZeroizingHeapStack::new(kb); assert_eq!( stack.stack_area().1, - ((kb * 1024 + page_size() - 1) / page_size()) * page_size() + (kb * 1024).div_ceil(page_size()) * page_size() ); } }