diff --git a/Cargo.lock b/Cargo.lock index 10f2e1c4..1f347605 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -31,6 +31,16 @@ dependencies = [ "hybrid-array", ] +[[package]] +name = "cc" +version = "1.2.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "65193589c6404eb80b450d618eaf9a2cafaaafd57ecce47370519ef674a7bd44" +dependencies = [ + "find-msvc-tools", + "shlex", +] + [[package]] name = "cfg-if" version = "1.0.4" @@ -111,6 +121,12 @@ dependencies = [ "sha3", ] +[[package]] +name = "find-msvc-tools" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fd99930f64d146689264c637b5af2f0233a933bef0d8570e2526bf9e083192d" + [[package]] name = "hex" version = "0.4.3" @@ -184,6 +200,15 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "psm" +version = "0.1.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e944464ec8536cd1beb0bbfd96987eb5e3b72f2ecdafdc5c769a37f1fa2ae1f" +dependencies = [ + "cc", +] + [[package]] name = "quote" version = "1.0.42" @@ -263,6 +288,12 @@ dependencies = [ "keccak", ] +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + [[package]] name = "subtle" version = "2.6.1" @@ -324,3 +355,12 @@ dependencies = [ "quote", "syn", ] + +[[package]] +name = "zeroize_stack" +version = "0.1.0" +dependencies = [ + "libc", + "psm", + "zeroize 1.8.2", +] diff --git a/Cargo.toml b/Cargo.toml index 12d085e6..533810bf 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -15,7 +15,8 @@ members = [ "opaque-debug", "wycheproof2blb", "zeroize", - "zeroize_derive" + "zeroize_derive", + "zeroize_stack" ] exclude = ["aarch64-dit"] diff --git a/zeroize_stack/Cargo.toml b/zeroize_stack/Cargo.toml new file mode 100644 index 00000000..5fa9ef71 --- /dev/null +++ b/zeroize_stack/Cargo.toml @@ -0,0 +1,30 @@ +[package] +name = "zeroize_stack" +version = "0.1.0" +description = """ +Securely zeroize the stack with a simple function built on +the Portable Stack Manipulation (psm) crate and zeroize crate. +""" +authors = ["The RustCrypto Project Developers"] +license = "Apache-2.0 OR MIT" +homepage = "https://github.com/RustCrypto/utils/tree/master/stack_sanitizer" +repository = "https://github.com/RustCrypto/utils" +readme = "README.md" +categories = ["cryptography", "memory-management", "no-std", "os"] +keywords = ["memory", "memset", "secure", "volatile", "zero", "stack"] +edition = "2024" +rust-version = "1.85" + +[dependencies] +libc = "0.2.156" +psm = "0.1.26" +zeroize = { version = "1.0" } + +[features] +default = ["heap", "stack", "std"] +heap = ["std"] +stack = [] +std = [] + +[package.metadata.docs.rs] +all-features = true diff --git a/zeroize_stack/README.md b/zeroize_stack/README.md new file mode 100644 index 00000000..0f4e0f7e --- /dev/null +++ b/zeroize_stack/README.md @@ -0,0 +1,65 @@ +# [RustCrypto]: zeroize_stack + +[![Crate][crate-image]][crate-link] +[![Docs][docs-image]][docs-link] +![Apache 2.0/MIT Licensed][license-image] +![MSRV][rustc-image] +[![Build Status][build-image]][build-link] + +Securely zero the stack (a.k.a. [zeroize]) while avoiding compiler optimizations. + +This crate implements a portable approach to securely zeroing the stack using +techniques which guarantee they won't be "optimized away" by the compiler. + +[Documentation] + +## About + +[Zeroing memory securely is hard] - compilers optimize for performance, and +in doing so they love to "optimize away" unnecessary zeroing calls, as well +as make extra copies of data on the stack that cannot be easily zeroed. That's +what this crate is for. + +This crate isn't about tricks: it uses [psm::on_stack] to run a function on +a portable stack, and then uses [zeroize] to zero that stack. `psm` implements +all of the assembly for several different architectures, and the [zeroize] +portion of the task was implemented in pure Rust. + +- `#![no_std]` i.e. **embedded-friendly**! (`alloc` is required) +- No functionality besides securely zeroing the a function's stack usage! + +## License + +Licensed under either of: + +* [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0) +* [MIT license](http://opensource.org/licenses/MIT) + +at your option. + +### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in the work by you, as defined in the Apache-2.0 license, shall be +dual licensed as above, without any additional terms or conditions. + +[//]: # (badges) + +[crate-image]: https://img.shields.io/crates/v/zeroize.svg +[crate-link]: https://crates.io/crates/zeroize +[docs-image]: https://docs.rs/zeroize/badge.svg +[docs-link]: https://docs.rs/zeroize/ +[license-image]: https://img.shields.io/badge/license-Apache2.0/MIT-blue.svg +[rustc-image]: https://img.shields.io/badge/rustc-1.85+-blue.svg +[build-image]: https://github.com/RustCrypto/utils/actions/workflows/zeroize.yml/badge.svg?branch=master +[build-link]: https://github.com/RustCrypto/utils/actions/workflows/zeroize.yml?query=branch:master + +[//]: # (general links) + +[RustCrypto]: https://github.com/RustCrypto +[zeroize]: https://en.wikipedia.org/wiki/Zeroisation +[`Zeroize` trait]: https://docs.rs/zeroize/latest/zeroize/trait.Zeroize.html +[Documentation]: https://docs.rs/zeroize/ +[Zeroing memory securely is hard]: http://www.daemonology.net/blog/2014-09-04-how-to-zero-a-buffer.html +[psm::on_stack]: https://docs.rs/psm/latest/psm/fn.on_stack.html +[good cryptographic hygiene]: https://github.com/veorq/cryptocoding#clean-memory-of-secret-data diff --git a/zeroize_stack/TODO.md b/zeroize_stack/TODO.md new file mode 100644 index 00000000..52290f25 --- /dev/null +++ b/zeroize_stack/TODO.md @@ -0,0 +1,47 @@ +# TODO: + +## Likely impossible and/or unsafe + +* Add support for async closures, possibly using a macro to define the functions if necessary. Use `futures::executor::block_on(f())` to poll the entire future completion inside the stack switched context, and avoid `.await` that yields control outside of the `on_stack()` boundary. Something like: + +```rust +pub unsafe fn exec_async_on_sanitized_stack( + stack: &mut [u8], + f: F, +) -> Result> +where + F: FnOnce() -> Fut + UnwindSafe, + Fut: Future, +{ + let mut result = None; + + on_stack(stack, || { + result = Some(catch_unwind(AssertUnwindSafe(|| { + // Block on the future inside the heap stack + futures::executor::block_on(f()) + }))); + }); + + result.expect("Closure did not run") +} +``` + +Copilot provided that code, but Gemini says that after the future is awaited, there will be no way for the program to know which stack to return to. Also, there is an open issue regarding async closures in `stacker` that has not been resolved after 7 months. https://github.com/rust-lang/stacker/issues/111 + +## Safe + +* Handle unwinds better: currently we return a `Result>`. The error case is a little bit tricky to handle, as dropping the error could cause a panic. The program should either panic, or return the panic payload's message. + +* Either: + * Panic when the OS is `hermit` or it is running on `wasm32` or `wasm64`, as their stacks don't behave the same as all of the others. + * Run the closure without `psm::on_stack` and generate a compiler warning stating that the target's stack layout is not supported with basic stack switching. + * Implement different types of `AlignedHeapStack` to cover `wasm32` and `hermit` as performed in the `stacker` crate. + +## Would require a PR to `stacker` to zero the allocated stack on drop + +* Use stacker crate to handle stack size management: if I read some of the `stacker` docs correctly, that crate should be able to extend the size of the stack when it is about to overflow. If that is correct, we could use their techniques to allocate a new stack and zeroize the old one whenever our allocated stack is about to overflow, eliminating the primary remaining `# Safety` comment. Note: we may not be able to zeroize the old stack immediately as the stack switching process likely attempts to return to the old stack once execution completes; we might have to wait until execution completes before zeroizing all heap-stacks. + +## Requires `asm!` + +* Add an `asm!` alternative method for stack bleaching. In theory, it would be better to use `asm!` as we would not need to worry about the size of the allocated switched stack, and it would keep all of the code running on the actual stack and not the heap, possibly preserving performance. The problem with this is that using pointers from `asm!` and rust code to zero the space between the pointers results in segmentation faults on `x86_64`. + * when testing this, assert that the two pointers are not equal to each other and not null. \ No newline at end of file diff --git a/zeroize_stack/src/heap/alloc.rs b/zeroize_stack/src/heap/alloc.rs new file mode 100644 index 00000000..c8eba653 --- /dev/null +++ b/zeroize_stack/src/heap/alloc.rs @@ -0,0 +1,77 @@ +//! This file contains code derived from the Rust project, +//! originally written by Alex Crichton and licensed under +//! the Apache License, Version 2.0 or the MIT license, at +//! your option. +//! +//! Copyright (c) 2014 Alex Crichton +//! +//! Licensed under the Apache License, Version 2.0 or the MIT license +//! , at your +//! option. This file may not be copied, modified, or distributed +//! except according to those terms. + +use core::{ptr, sync::atomic}; + +extern crate std; + +/// A zeroizing heap-based stack. Feed one of these into the `switch_stacks` +/// function. +pub struct ZeroizingHeapStack { + new_stack: *mut u8, + stack_bytes: usize, +} + +const ALIGNMENT: usize = 32; + +impl ZeroizingHeapStack { + /// Initializes a new "Zeroizing Heap Stack". To be fed into the `switch_stacks` + /// function, and it can be reused, but it must not be reused while it is in use. + /// The borrow-checker should enforce this. + pub fn new(stack_kb: usize) -> ZeroizingHeapStack { + let stack_bytes = stack_kb * 1024; + assert!( + stack_bytes as isize > 0, + "stack_kb must be positive and must not overflow isize when expanded to number of bytes instead of KB" + ); + // On these platforms we do not use stack guards. this is very unfortunate, + // but there is not much we can do about it without OS support. + // We simply allocate the requested size from the global allocator with a suitable + // alignment. + let stack_bytes = stack_bytes + .checked_add(ALIGNMENT - 1) + .expect("unreasonably large stack requested") + / ALIGNMENT + * ALIGNMENT; + let layout = std::alloc::Layout::from_size_align(stack_bytes, ALIGNMENT).unwrap(); + let ptr = unsafe { std::alloc::alloc(layout) }; + assert!(!ptr.is_null(), "unable to allocate stack"); + ZeroizingHeapStack { + new_stack: ptr, + stack_bytes, + } + } + /// Returns (`start ptr of usable stack`, `size of usable stack`). + pub fn stack_area(&self) -> (*mut u8, usize) { + (self.new_stack, self.stack_bytes) + } +} + +impl Drop for ZeroizingHeapStack { + fn drop(&mut self) { + let mut ptr = self.new_stack as *mut u128; + for _ in 0..self.stack_bytes / size_of::() { + unsafe { + ptr::write_volatile(ptr, 0); + ptr = ptr.add(1); + } + } + atomic::compiler_fence(atomic::Ordering::SeqCst); + unsafe { + std::alloc::dealloc( + self.new_stack, + std::alloc::Layout::from_size_align_unchecked(self.stack_bytes, ALIGNMENT), + ); + } + } +} \ No newline at end of file diff --git a/zeroize_stack/src/heap/mmap.rs b/zeroize_stack/src/heap/mmap.rs new file mode 100644 index 00000000..9dc4ff02 --- /dev/null +++ b/zeroize_stack/src/heap/mmap.rs @@ -0,0 +1,152 @@ +//! This file contains code derived from the Rust project, +//! originally written by Alex Crichton and licensed under +//! the Apache License, Version 2.0 or the MIT license, at +//! your option. +//! +//! Copyright (c) 2014 Alex Crichton +//! +//! Licensed under the Apache License, Version 2.0 or the MIT license +//! , at your +//! option. This file may not be copied, modified, or distributed +//! except according to those terms. + +use core::ptr; + +use zeroize::ZeroizeOnDrop; + +extern crate std; + +/// A zeroizing heap-based stack. Feed one of these into the `switch_stacks` +/// function. +pub struct ZeroizingHeapStack { + mapping: *mut u8, + size_with_guard: usize, + page_size: usize, +} + +impl ZeroizingHeapStack { + /// Initializes a new "Zeroizing Heap Stack". To be fed into the `switch_stacks` + /// function, and it can be reused, but it must not be reused while it is in use. + /// The borrow-checker should enforce this. + pub fn new(stack_kb: usize) -> ZeroizingHeapStack { + // For maximum portability we want to produce a stack that is aligned to a page and has + // a size that’s a multiple of page size. It is natural to use mmap to allocate + // these pages. Furthermore, we want to allocate two extras pages for the stack guard. + // To achieve that we do our calculations in number of pages and convert to bytes last. + let page_size = page_size(); + let requested_pages = stack_kb + .checked_mul(1024) + .expect("unreasonably large stack requested") + .checked_add(page_size - 1) + .expect("unreasonably large stack requested") + / page_size; + let page_count_with_guard = std::cmp::max(1, requested_pages) + 2; + let size_with_guard = page_count_with_guard + .checked_mul(page_size) + .expect("unreasonably large stack requested"); + + unsafe { + let new_stack = libc::mmap( + ptr::null_mut(), + size_with_guard, + libc::PROT_NONE, + libc::MAP_PRIVATE | libc::MAP_ANON, + -1, // Some implementations assert fd = -1 if MAP_ANON is specified + 0, + ); + assert_ne!( + new_stack, + libc::MAP_FAILED, + "mmap failed to allocate stack: {}", + std::io::Error::last_os_error() + ); + let guard = ZeroizingHeapStack { + mapping: new_stack as *mut u8, + page_size, + size_with_guard, + }; + // We leave two guard pages without read/write access in our allocation. + // There is one guard page below the stack and another above it. + let above_guard_page = new_stack.add(page_size); + #[cfg(not(target_os = "openbsd"))] + let result = libc::mprotect( + above_guard_page, + size_with_guard - 2 * page_size, + libc::PROT_READ | libc::PROT_WRITE, + ); + #[cfg(target_os = "openbsd")] + let result = if libc::mmap( + above_guard_page, + size_with_guard - 2 * page_size, + libc::PROT_READ | libc::PROT_WRITE, + libc::MAP_FIXED | libc::MAP_PRIVATE | libc::MAP_ANON | libc::MAP_STACK, + -1, + 0, + ) == above_guard_page + { + 0 + } else { + -1 + }; + assert_ne!( + result, + -1, + "mprotect/mmap failed: {}", + std::io::Error::last_os_error() + ); + guard + } + } + + // TODO this should return a *mut [u8], but pointer slices only got proper support with Rust 1.79. + /// Returns (`start ptr of usable stack`, `size of usable stack`). + pub fn stack_area(&self) -> (*mut u8, usize) { + unsafe { + ( + self.mapping.add(self.page_size), + self.size_with_guard - 2 * self.page_size, + ) + } + } +} + +impl Drop for ZeroizingHeapStack { + fn drop(&mut self) { + let (mut ptr, size) = self.stack_area(); + for _ in 0..size / size_of::() { + unsafe { + ptr::write_volatile(ptr, 0); + ptr = ptr.add(1); + } + } + unsafe { + // FIXME: check the error code and decide what to do with it. + // Perhaps a debug_assertion? + libc::munmap(self.mapping as *mut std::ffi::c_void, self.size_with_guard); + } + } +} + +impl ZeroizeOnDrop for ZeroizingHeapStack {} + +fn page_size() -> usize { + // FIXME: consider caching the page size. + unsafe { libc::sysconf(libc::_SC_PAGE_SIZE) as usize } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn stack_size() { + for kb in 1..64 { + let stack = ZeroizingHeapStack::new(kb); + assert_eq!( + stack.stack_area().1, + (kb * 1024).div_ceil(page_size()) * page_size() + ); + } + } +} \ No newline at end of file diff --git a/zeroize_stack/src/heap/mod.rs b/zeroize_stack/src/heap/mod.rs new file mode 100644 index 00000000..5a40c733 --- /dev/null +++ b/zeroize_stack/src/heap/mod.rs @@ -0,0 +1,108 @@ +//! Heap-based stack zeroization module. This module uses Rust-Lang's `psm` +//! crate to switch stacks to a stack that is allocated on the heap +//! (`ZeroizingHeapStack`) and then executes a callback function on that +//! stack. You can reuse this stack as many times as you want, and when it is +//! dropped, it will be zeroized. + +use core::panic::UnwindSafe; + +use psm::psm_stack_manipulation; + +#[cfg(feature = "std")] +extern crate std; + +psm_stack_manipulation! { + yes { + #[cfg(any(target_family = "wasm", target_os = "hermit"))] + #[path = "alloc.rs"] + mod heap_struct; + + #[cfg(not(any(target_family = "wasm", target_os = "hermit")))] + #[path = "mmap.rs"] + mod heap_struct; + + pub use heap_struct::ZeroizingHeapStack; + + /// Executes a closure on a provided zeroizing heap-based stack. + /// + /// This function does not clear CPU registers. + /// + /// # Arguments + /// + /// * `zeroizing_heap_stack` - the heap-based stack you plan on using + /// for running the closure. `psm` recommends at least `4 KiB` of stack space, + /// but the total size cannot overflow an `isize`. Also, some architectures + /// might consume more memory in the stack, such as SPARC. + /// + /// * `crypto_fn` - the code to run while on the switched stack. + /// + /// ## Panicking + /// + /// This function does not panic, but it can segfault. + /// + /// ## Segfaults + /// + /// This code will cause a segmentation fault if your closure consumes + /// more stack space than what you have allocated. + /// + /// ## Debugging + /// + /// Using `#[inline(never)]` on the closure's function definition(s) could + /// make it easier to debug as the function(s) should then show up in + /// backtraces. + /// + /// # Returns + /// + /// This function returns the returned value from the closure. + /// + /// # Safety + /// + /// * The stack needs to be large enough for `crypto_fn()` to execute + /// without overflowing. + /// + /// * For `nostd`, you should use `panic = 'abort'` to avoid unwinding + /// on the switched stack. Unwinding across stack boundaries could cause + /// undefined behavior. `nostd` code must not unwind or return control + /// flow by any other means. + pub unsafe fn switch_stacks(zeroizing_heap_stack: &mut ZeroizingHeapStack, crypto_fn: F) -> R + where + F: FnOnce() -> R + UnwindSafe, + { + let (stack_ptr, size) = zeroizing_heap_stack.stack_area(); + unsafe { + let panic = psm::on_stack(stack_ptr, size, move || { + #[cfg(feature = "std")] + { + std::panic::catch_unwind(std::panic::AssertUnwindSafe(crypto_fn)) + } + #[cfg(not(feature = "std"))] + return crypto_fn() + }); + match panic { + Err(p) => std::panic::resume_unwind(p), + Ok(v) => v + } + } + } + } + + no { + pub struct ZeroizingHeapStack; + compiler_warning(f) + impl ZeroizingHeapStack { + pub fn new(stack_kb: usize) -> Self { + let _ = stack_kb; + ZeroizingHeapStack + } + } + /// PSM is unavailable on this arch/target. + #[deprecated(note = "PSM is unavailable on this arch/target. Crypto closures will not run on a zeroizing stack.")] + pub unsafe fn switch_stacks(zeroizing_heap_stack: &mut ZeroizingHeapStack, crypto_fn: F) -> R + where + F: FnOnce() -> R + UnwindSafe, + { + let _ = zeroizing_heap_stack; + crypto_fn() + } + } +} diff --git a/zeroize_stack/src/lib.rs b/zeroize_stack/src/lib.rs new file mode 100644 index 00000000..a1c59ab7 --- /dev/null +++ b/zeroize_stack/src/lib.rs @@ -0,0 +1,62 @@ +#![no_std] +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc( + html_logo_url = "https://raw.githubusercontent.com/RustCrypto/media/6ee8e381/logo.svg", + html_favicon_url = "https://raw.githubusercontent.com/RustCrypto/media/6ee8e381/logo.svg" +)] +#![warn(missing_docs, unused_qualifications)] + +//! # zeroize_stack +//! +//! A crate for sanitizing stack memory after sensitive operations—sometimes referred to as _Stack Bleaching_. +//! +//! Modern compilers and CPUs routinely copy, spill, and rearrange data during execution. Even if sensitive values are scoped to a function, they may: +//! - Be duplicated across multiple stack frames +//! - Be spilled from registers to the stack during register pressure +//! - Persist in memory long after the function returns +//! +//! This crate provides tools to explicitly zeroize stack regions used during +//! cryptographic or sensitive computations, helping mitigate: +//! - Leakage through stack inspection or memory dumps +//! - Residual data from compiler-inserted spills +//! - ABI-visible register reuse across function boundaries +//! +//! ## Why Stack Sanitization Matters +//! +//! Unlike heap memory, stack allocations are ephemeral and compiler-controlled. +//! Sensitive data may be: +//! - Copied implicitly by the optimizer +//! - Stored temporarily during register allocation +//! - Left behind in stack frames even after function return +//! +//! This crate offers abstractions for: +//! - Executing functions on isolated, aligned stack buffers +//! - Zeroizing stack memory after execution +//! +//! ## Safety +//! +//! These operations involve low-level stack manipulation and unsafe code. The +//! caller must ensure: +//! - The stack size provided is large enough for the closure to run with. +//! - The closure does not unwind or return control flow by any means other than +//! directly returning. `std` users do not need to worry about this due to +//! the existence of `catch_unwind`. +//! +//! ## `nostd` Support +//! +//! This crate is compatible with `nostd` environments, but it is less safe +//! in the event that your stack-switched stack panics. Panicking on a separate +//! stack can cause undefined behavior (UB), but if it can be caught with +//! `std::panic::catch_unwind`, that aspect of the safety should be more safe. +//! +//! When using `nostd`, try to ensure that `panic = "abort"` to avoid the +//! unsafety of unwinding across stack boundaries. +//! +//! ## Use Cases +//! +//! - Cryptographic routines +//! - Secure enclave transitions +//! - Sanitizing temporary buffers in high-assurance systems + +#[cfg(feature = "heap")] +pub mod heap; diff --git a/zeroize_stack/tests/zeroize_stack.rs b/zeroize_stack/tests/zeroize_stack.rs new file mode 100644 index 00000000..8671f4f5 --- /dev/null +++ b/zeroize_stack/tests/zeroize_stack.rs @@ -0,0 +1,49 @@ +//! Stack sanitization integration tests + +mod stack_sanitization_tests { + use std::panic::AssertUnwindSafe; + + use zeroize_stack::heap::{ZeroizingHeapStack, switch_stacks}; + + #[inline(never)] + fn dummy_fn() -> (*const u8, u64) { + let temporary_data = 42; + let ptr = temporary_data as *const u8; + (ptr, 12345) + } + + #[test] + fn stack_sanitization_v2() { + let mut heap_stack = ZeroizingHeapStack::new(4); + let result = unsafe { switch_stacks(&mut heap_stack, dummy_fn) }; + assert_eq!(result.1, 12345); + // results in segmentation fault, which is somewhat normal... just wanted + // to try it + // assert_eq!(unsafe {*result.0}, 42); + } + + #[test] + fn allow_stack_reuse_between_calls() { + let mut heap_stack = ZeroizingHeapStack::new(4); + let result_1 = unsafe { switch_stacks(&mut heap_stack, dummy_fn) }; + assert_eq!(result_1.1, 12345); + let result_2 = unsafe { switch_stacks(&mut heap_stack, dummy_fn) }; + assert_eq!(result_2.1, 12345); + } + + fn non_returning_function(v: &mut u32) { + *v += 5; + } + #[test] + fn non_returning_function_test() { + let mut heap_stack = ZeroizingHeapStack::new(4); + let mut v = 0; + unsafe { + switch_stacks( + &mut heap_stack, + AssertUnwindSafe(|| non_returning_function(&mut v)), + ) + }; + assert_eq!(v, 5); + } +}