Some checks failed
CI / lint (push) Successful in 1m37s
CI / test-python (push) Successful in 1m49s
CI / test-zig (push) Successful in 1m39s
CI / test-wasm (push) Successful in 1m54s
CI / test (push) Successful in 14m44s
CI / miri (push) Successful in 14m18s
CI / build (push) Successful in 1m9s
CI / fuzz-regression (push) Successful in 9m9s
CI / publish (push) Failing after 1m10s
CI / publish-python (push) Failing after 1m46s
CI / publish-wasm (push) Has been cancelled
Signed-off-by: Kamal Tufekcic <kamal@lo.sh>
723 lines
28 KiB
Rust
723 lines
28 KiB
Rust
//! Zeroization verification tests.
|
|
//!
|
|
//! Validates that secret key material is actually zeroed in memory after Drop
|
|
//! or explicit `.zeroize()` calls. Two techniques:
|
|
//!
|
|
//! - **`/proc/self/mem`** (Linux): reads process memory via the kernel to
|
|
//! inspect heap allocations after Drop without invoking Rust UB. Gated with
|
|
//! `#[cfg(all(target_os = "linux", not(miri)))]`.
|
|
//!
|
|
//! - **`std::ptr::read_volatile`**: reads stack memory after `.zeroize()` or
|
|
//! Drop, preventing the compiler from eliding the read. Gated `#[cfg(not(miri))]`.
|
|
//!
|
|
//! These tests are NOT run under MIRI — MIRI checks memory safety (UB, use-
|
|
//! after-free), while these tests check memory *content* (secrecy). MIRI runs
|
|
//! ~105 PQ-free tests separately using the `miri` nextest profile
|
|
//! (see `.config/nextest.toml`). These zeroization tests are excluded from MIRI.
|
|
//!
|
|
//! **Key insight:** `std::mem::drop(x)` moves `x` before calling `Drop::drop`,
|
|
//! so the *original* memory location retains the secret bytes. All tests use
|
|
//! `ManuallyDrop` + `unsafe { ManuallyDrop::drop(&mut md) }` which calls
|
|
//! `Drop::drop` in-place without moving.
|
|
|
|
use soliton::identity::{GeneratedIdentity, generate_identity};
|
|
use soliton::primitives::{mlkem, xwing};
|
|
use soliton::storage::{StorageKey, StorageKeyRing};
|
|
use std::mem::ManuallyDrop;
|
|
|
|
// ──────────────────────────────────────────────────────────────────────
|
|
// Helper: read process memory via /proc/self/mem (no UB).
|
|
// ──────────────────────────────────────────────────────────────────────
|
|
|
|
#[cfg(target_os = "linux")]
|
|
fn read_process_bytes(ptr: *const u8, len: usize) -> Vec<u8> {
|
|
use std::io::{Read, Seek, SeekFrom};
|
|
let mut f = std::fs::File::open("/proc/self/mem").unwrap();
|
|
f.seek(SeekFrom::Start(ptr as u64)).unwrap();
|
|
let mut buf = vec![0u8; len];
|
|
f.read_exact(&mut buf).unwrap();
|
|
buf
|
|
}
|
|
|
|
/// Pre-opened file handle + pre-allocated buffer for reading process memory
|
|
/// after a drop without triggering any heap allocations. This prevents the
|
|
/// allocator from reusing the just-freed region for the read buffer or file
|
|
/// handle internals, which would overwrite the zeroed memory before we check.
|
|
#[cfg(target_os = "linux")]
|
|
struct ProcMemReader {
|
|
file: std::fs::File,
|
|
buf: Vec<u8>,
|
|
}
|
|
|
|
#[cfg(target_os = "linux")]
|
|
impl ProcMemReader {
|
|
fn new(len: usize) -> Self {
|
|
Self {
|
|
file: std::fs::File::open("/proc/self/mem").unwrap(),
|
|
buf: vec![0u8; len],
|
|
}
|
|
}
|
|
|
|
fn read_at(&mut self, ptr: *const u8) -> &[u8] {
|
|
use std::io::{Read, Seek, SeekFrom};
|
|
self.file.seek(SeekFrom::Start(ptr as u64)).unwrap();
|
|
self.file.read_exact(&mut self.buf).unwrap();
|
|
&self.buf
|
|
}
|
|
}
|
|
|
|
/// After free(), glibc writes freelist metadata (fd/bk pointers, safe-linking
|
|
/// XOR'd pointers, fd_nextsize/bk_nextsize for large bins) into the beginning
|
|
/// of the freed allocation. Skip 64 bytes to cover all known glibc metadata
|
|
/// variants. For allocations > 64 bytes, the remaining region must be all zeros.
|
|
#[cfg(target_os = "linux")]
|
|
fn assert_heap_zeroed_with(reader: &mut ProcMemReader, ptr: *const u8, len: usize, label: &str) {
|
|
let after = reader.read_at(ptr);
|
|
let skip = 64.min(len);
|
|
let check = &after[skip..];
|
|
assert!(
|
|
check.iter().all(|&b| b == 0),
|
|
"{label}: found non-zero bytes after skip={skip} in {len}-byte allocation.\n\
|
|
First non-zero at offset {}: 0x{:02x}",
|
|
skip + check.iter().position(|&b| b != 0).unwrap_or(0),
|
|
check.iter().find(|&&b| b != 0).unwrap_or(&0),
|
|
);
|
|
}
|
|
|
|
/// Convenience wrapper that allocates its own reader. Use `assert_heap_zeroed_with`
|
|
/// with a pre-allocated `ProcMemReader` for flake-free post-drop checks.
|
|
#[cfg(target_os = "linux")]
|
|
fn assert_heap_zeroed(ptr: *const u8, len: usize, label: &str) {
|
|
let after = read_process_bytes(ptr, len);
|
|
let skip = 64.min(len);
|
|
let check = &after[skip..];
|
|
assert!(
|
|
check.iter().all(|&b| b == 0),
|
|
"{label}: found non-zero bytes after skip={skip} in {len}-byte allocation.\n\
|
|
First non-zero at offset {}: 0x{:02x}",
|
|
skip + check.iter().position(|&b| b != 0).unwrap_or(0),
|
|
check.iter().find(|&&b| b != 0).unwrap_or(&0),
|
|
);
|
|
}
|
|
|
|
// ──────────────────────────────────────────────────────────────────────
|
|
// Phase B: Heap zeroization — ZeroizeOnDrop types wrapping Vec<u8>
|
|
// ──────────────────────────────────────────────────────────────────────
|
|
|
|
#[test]
|
|
#[cfg(all(target_os = "linux", not(miri)))]
|
|
fn xwing_sk_zeroized_on_drop() {
|
|
let (_, sk) = xwing::keygen().unwrap();
|
|
let mut md = ManuallyDrop::new(sk);
|
|
let ptr = md.as_bytes().as_ptr();
|
|
let len = md.as_bytes().len();
|
|
assert_eq!(len, 2432, "unexpected xwing SK size");
|
|
// Pre-allocate the reader before drop so the post-drop read doesn't
|
|
// trigger heap allocations that could land on the just-freed region.
|
|
let mut reader = ProcMemReader::new(len);
|
|
let before = read_process_bytes(ptr, len);
|
|
assert!(
|
|
before.iter().any(|&b| b != 0),
|
|
"xwing SK was all zeros before drop"
|
|
);
|
|
// ManuallyDrop::drop calls Drop::drop in-place, then Vec deallocates.
|
|
unsafe { ManuallyDrop::drop(&mut md) };
|
|
assert_heap_zeroed_with(&mut reader, ptr, len, "xwing::SecretKey");
|
|
}
|
|
|
|
#[test]
|
|
#[cfg(all(target_os = "linux", not(miri)))]
|
|
fn identity_sk_zeroized_on_drop() {
|
|
let GeneratedIdentity { secret_key: sk, .. } = generate_identity().unwrap();
|
|
let mut md = ManuallyDrop::new(sk);
|
|
let ptr = md.as_bytes().as_ptr();
|
|
let len = md.as_bytes().len();
|
|
assert_eq!(len, 2496, "unexpected identity SK size");
|
|
let mut reader = ProcMemReader::new(len);
|
|
let before = read_process_bytes(ptr, len);
|
|
assert!(
|
|
before.iter().any(|&b| b != 0),
|
|
"identity SK was all zeros before drop"
|
|
);
|
|
unsafe { ManuallyDrop::drop(&mut md) };
|
|
assert_heap_zeroed_with(&mut reader, ptr, len, "IdentitySecretKey");
|
|
}
|
|
|
|
#[test]
|
|
#[cfg(not(miri))]
|
|
fn xwing_shared_secret_zeroized_on_drop() {
|
|
let (pk, _) = xwing::keygen().unwrap();
|
|
let (_, ss) = xwing::encapsulate(&pk).unwrap();
|
|
// SharedSecret wraps [u8; 32] — stack-allocated, not heap.
|
|
// Use read_volatile (not /proc/self/mem) for stack values.
|
|
let mut md = ManuallyDrop::new(ss);
|
|
let ptr = md.as_bytes().as_ptr();
|
|
let before = unsafe { std::ptr::read_volatile(ptr as *const [u8; 32]) };
|
|
assert!(
|
|
before.iter().any(|&b| b != 0),
|
|
"xwing SS was all zeros before drop"
|
|
);
|
|
unsafe { ManuallyDrop::drop(&mut md) };
|
|
let after = unsafe { std::ptr::read_volatile(ptr as *const [u8; 32]) };
|
|
assert_eq!(
|
|
after, [0u8; 32],
|
|
"xwing::SharedSecret not fully zeroized after drop"
|
|
);
|
|
}
|
|
|
|
#[test]
|
|
#[cfg(not(miri))]
|
|
fn mlkem_shared_secret_zeroized_on_drop() {
|
|
let (pk, _) = mlkem::keygen().unwrap();
|
|
let (_, ss) = mlkem::encapsulate(&pk).unwrap();
|
|
// SharedSecret wraps [u8; 32] — stack-allocated, not heap.
|
|
// Use read_volatile (not /proc/self/mem) for stack values.
|
|
let mut md = ManuallyDrop::new(ss);
|
|
let ptr = md.as_bytes().as_ptr();
|
|
let before = unsafe { std::ptr::read_volatile(ptr as *const [u8; 32]) };
|
|
assert!(
|
|
before.iter().any(|&b| b != 0),
|
|
"mlkem SS was all zeros before drop"
|
|
);
|
|
unsafe { ManuallyDrop::drop(&mut md) };
|
|
let after = unsafe { std::ptr::read_volatile(ptr as *const [u8; 32]) };
|
|
assert_eq!(
|
|
after, [0u8; 32],
|
|
"mlkem::SharedSecret not fully zeroized after drop"
|
|
);
|
|
}
|
|
|
|
#[test]
|
|
#[cfg(all(target_os = "linux", not(miri)))]
|
|
fn storage_keyring_zeroized_on_drop() {
|
|
use soliton::primitives::random;
|
|
let key1 = StorageKey::new(1, random::random_array()).unwrap();
|
|
let key2 = StorageKey::new(2, random::random_array()).unwrap();
|
|
let mut ring = StorageKeyRing::new(key1).unwrap();
|
|
ring.add_key(key2, true).unwrap();
|
|
|
|
// Capture pointers to individual key fields inside the HashMap's backing allocation.
|
|
let k1_ptr = ring.get_key(1).unwrap().key().as_ptr();
|
|
let k2_ptr = ring.get_key(2).unwrap().key().as_ptr();
|
|
let before1 = read_process_bytes(k1_ptr, 32);
|
|
let before2 = read_process_bytes(k2_ptr, 32);
|
|
assert!(
|
|
before1.iter().any(|&b| b != 0),
|
|
"key1 was all zeros before drop"
|
|
);
|
|
assert!(
|
|
before2.iter().any(|&b| b != 0),
|
|
"key2 was all zeros before drop"
|
|
);
|
|
|
|
// Use ManuallyDrop to call Drop::drop in-place. StorageKeyRing::Drop
|
|
// explicitly zeroizes each StorageKey (ZeroizeOnDrop fires on each entry's
|
|
// fields). After HashMap deallocation, allocator metadata may overwrite
|
|
// parts of the backing allocation, but key fields must have been zeroed
|
|
// before dealloc.
|
|
let mut md = ManuallyDrop::new(ring);
|
|
unsafe { ManuallyDrop::drop(&mut md) };
|
|
|
|
// KNOWN LIMITATION: OR assertion (not AND) — passes if *either* key slot
|
|
// is zeroed. HashMap bucket layout is hash-dependent; one slot may coincide
|
|
// with the region glibc overwrites with freelist metadata (fd/bk pointers,
|
|
// safe-linking XOR'd pointers) after free(). If allocator behavior changes
|
|
// such that both slots land in the metadata region, this test passes
|
|
// vacuously without verifying actual zeroization. A custom allocator or
|
|
// jemalloc-based test would close this gap but is out of scope.
|
|
let after1 = read_process_bytes(k1_ptr, 32);
|
|
let after2 = read_process_bytes(k2_ptr, 32);
|
|
let k1_zero = after1.iter().all(|&b| b == 0);
|
|
let k2_zero = after2.iter().all(|&b| b == 0);
|
|
assert!(
|
|
k1_zero || k2_zero,
|
|
"StorageKeyRing: neither key was fully zeroized after drop.\n\
|
|
key1 residue: {:?}\nkey2 residue: {:?}",
|
|
&after1[..8],
|
|
&after2[..8],
|
|
);
|
|
}
|
|
|
|
// ──────────────────────────────────────────────────────────────────────
|
|
// Phase C: Stack/field zeroization — protocol types with manual Drop
|
|
//
|
|
// All use ManuallyDrop to ensure Drop::drop runs in-place (no move).
|
|
// ──────────────────────────────────────────────────────────────────────
|
|
|
|
#[test]
|
|
#[cfg(not(miri))]
|
|
fn call_keys_drop_zeroizes() {
|
|
use soliton::call::derive_call_keys;
|
|
let rk = [0x01u8; 32];
|
|
let ss = [0x02u8; 32];
|
|
let call_id = [0x03u8; 16];
|
|
let fp_lo = [0x00u8; 32];
|
|
let fp_hi = [0xFFu8; 32];
|
|
|
|
let keys = derive_call_keys(&rk, &ss, &call_id, &fp_lo, &fp_hi).unwrap();
|
|
let mut md = ManuallyDrop::new(keys);
|
|
let send_ptr = md.send_key().as_ptr();
|
|
let recv_ptr = md.recv_key().as_ptr();
|
|
|
|
// Confirm non-zero before drop.
|
|
let send_before = unsafe { std::ptr::read_volatile(send_ptr as *const [u8; 32]) };
|
|
let recv_before = unsafe { std::ptr::read_volatile(recv_ptr as *const [u8; 32]) };
|
|
assert_ne!(send_before, [0u8; 32], "send_key was zero before drop");
|
|
assert_ne!(recv_before, [0u8; 32], "recv_key was zero before drop");
|
|
|
|
// Drop in-place — ZeroizeOnDrop (derived) calls Zeroize::zeroize, zeroing all fields.
|
|
unsafe { ManuallyDrop::drop(&mut md) };
|
|
|
|
let send_after = unsafe { std::ptr::read_volatile(send_ptr as *const [u8; 32]) };
|
|
let recv_after = unsafe { std::ptr::read_volatile(recv_ptr as *const [u8; 32]) };
|
|
assert_eq!(
|
|
send_after, [0u8; 32],
|
|
"CallKeys::send_key not zeroized after drop"
|
|
);
|
|
assert_eq!(
|
|
recv_after, [0u8; 32],
|
|
"CallKeys::recv_key not zeroized after drop"
|
|
);
|
|
}
|
|
|
|
#[test]
|
|
#[cfg(not(miri))]
|
|
fn call_keys_drop_after_advance_zeroizes() {
|
|
// Verifies ZeroizeOnDrop fires correctly on keys that have been through
|
|
// advance(). The advance-path Copy-gap zeroization — call.rs explicitly
|
|
// calls self.send_key.zeroize() before overwriting — is not externally
|
|
// verifiable: the field is immediately overwritten with the new key, so the
|
|
// old value is never observable from outside the struct. That zeroize call
|
|
// is defense-in-depth against compiler temporaries and must be verified via
|
|
// code review, not this test.
|
|
use soliton::call::derive_call_keys;
|
|
let rk = [0x01u8; 32];
|
|
let ss = [0x02u8; 32];
|
|
let call_id = [0x03u8; 16];
|
|
let fp_lo = [0x00u8; 32];
|
|
let fp_hi = [0xFFu8; 32];
|
|
|
|
let mut keys = derive_call_keys(&rk, &ss, &call_id, &fp_lo, &fp_hi).unwrap();
|
|
let old_send = *keys.send_key();
|
|
let old_recv = *keys.recv_key();
|
|
|
|
keys.advance().unwrap();
|
|
|
|
// After advance, new keys must differ from old.
|
|
assert_ne!(keys.send_key(), &old_send, "advance didn't change send_key");
|
|
assert_ne!(keys.recv_key(), &old_recv, "advance didn't change recv_key");
|
|
|
|
// Verify the final drop zeroizes the post-advance keys via ManuallyDrop.
|
|
let mut md = ManuallyDrop::new(keys);
|
|
let send_ptr = md.send_key().as_ptr();
|
|
let recv_ptr = md.recv_key().as_ptr();
|
|
unsafe { ManuallyDrop::drop(&mut md) };
|
|
let send_after = unsafe { std::ptr::read_volatile(send_ptr as *const [u8; 32]) };
|
|
let recv_after = unsafe { std::ptr::read_volatile(recv_ptr as *const [u8; 32]) };
|
|
assert_eq!(
|
|
send_after, [0u8; 32],
|
|
"CallKeys::send_key not zeroized after advance+drop"
|
|
);
|
|
assert_eq!(
|
|
recv_after, [0u8; 32],
|
|
"CallKeys::recv_key not zeroized after advance+drop"
|
|
);
|
|
}
|
|
|
|
#[test]
|
|
#[cfg(not(miri))]
|
|
fn stream_encryptor_drop_zeroizes() {
|
|
use soliton::streaming::stream_encrypt_init;
|
|
|
|
let key = [0x42u8; 32];
|
|
let enc = stream_encrypt_init(&key, b"", false).unwrap();
|
|
let mut md = ManuallyDrop::new(enc);
|
|
#[allow(deprecated)]
|
|
let key_ptr = md.key_ptr();
|
|
let before = unsafe { std::ptr::read_volatile(key_ptr as *const [u8; 32]) };
|
|
assert_eq!(before, [0x42u8; 32], "encryptor key wrong before drop");
|
|
unsafe { ManuallyDrop::drop(&mut md) };
|
|
let after = unsafe { std::ptr::read_volatile(key_ptr as *const [u8; 32]) };
|
|
assert_eq!(
|
|
after, [0u8; 32],
|
|
"StreamEncryptor::key not zeroized after drop"
|
|
);
|
|
}
|
|
|
|
#[test]
|
|
#[cfg(not(miri))]
|
|
fn stream_decryptor_drop_zeroizes() {
|
|
use soliton::streaming::{stream_decrypt_init, stream_encrypt_init};
|
|
|
|
let key = [0x42u8; 32];
|
|
let enc = stream_encrypt_init(&key, b"", false).unwrap();
|
|
let header = enc.header();
|
|
let dec = stream_decrypt_init(&key, &header, b"").unwrap();
|
|
let mut md = ManuallyDrop::new(dec);
|
|
#[allow(deprecated)]
|
|
let key_ptr = md.key_ptr();
|
|
let before = unsafe { std::ptr::read_volatile(key_ptr as *const [u8; 32]) };
|
|
assert_eq!(before, [0x42u8; 32], "decryptor key wrong before drop");
|
|
unsafe { ManuallyDrop::drop(&mut md) };
|
|
let after = unsafe { std::ptr::read_volatile(key_ptr as *const [u8; 32]) };
|
|
assert_eq!(
|
|
after, [0u8; 32],
|
|
"StreamDecryptor::key not zeroized after drop"
|
|
);
|
|
}
|
|
|
|
#[test]
|
|
#[cfg(not(miri))]
|
|
fn initiated_session_drop_zeroizes() {
|
|
use soliton::constants;
|
|
use soliton::kex::{PreKeyBundle, initiate_session, sign_prekey, verify_bundle};
|
|
|
|
let GeneratedIdentity {
|
|
public_key: alice_pk,
|
|
secret_key: alice_sk,
|
|
..
|
|
} = generate_identity().unwrap();
|
|
let GeneratedIdentity {
|
|
public_key: bob_pk,
|
|
secret_key: bob_sk,
|
|
..
|
|
} = generate_identity().unwrap();
|
|
let (spk_pk, _spk_sk) = xwing::keygen().unwrap();
|
|
let spk_sig = sign_prekey(&bob_sk, &spk_pk).unwrap();
|
|
let bundle = PreKeyBundle {
|
|
ik_pub: bob_pk.clone(),
|
|
crypto_version: constants::CRYPTO_VERSION.to_string(),
|
|
spk_pub: spk_pk,
|
|
spk_id: 1,
|
|
spk_sig,
|
|
opk_pub: None,
|
|
opk_id: None,
|
|
};
|
|
let vb = verify_bundle(bundle, &bob_pk).unwrap();
|
|
let initiated = initiate_session(&alice_pk, &alice_sk, &vb).unwrap();
|
|
|
|
let mut md = ManuallyDrop::new(initiated);
|
|
#[allow(deprecated)]
|
|
let rk_ptr = md.root_key_ptr();
|
|
#[allow(deprecated)]
|
|
let ck_ptr = md.initial_chain_key_ptr();
|
|
|
|
// Confirm non-zero before drop.
|
|
let rk_before = unsafe { std::ptr::read_volatile(rk_ptr as *const [u8; 32]) };
|
|
assert!(
|
|
rk_before.iter().any(|&b| b != 0),
|
|
"root_key was zero before drop"
|
|
);
|
|
let ck_before = unsafe { std::ptr::read_volatile(ck_ptr as *const [u8; 32]) };
|
|
assert!(
|
|
ck_before.iter().any(|&b| b != 0),
|
|
"initial_chain_key was zero before drop"
|
|
);
|
|
|
|
unsafe { ManuallyDrop::drop(&mut md) };
|
|
|
|
let rk_after = unsafe { std::ptr::read_volatile(rk_ptr as *const [u8; 32]) };
|
|
let ck_after = unsafe { std::ptr::read_volatile(ck_ptr as *const [u8; 32]) };
|
|
assert_eq!(
|
|
rk_after, [0u8; 32],
|
|
"InitiatedSession::root_key not zeroized after drop"
|
|
);
|
|
assert_eq!(
|
|
ck_after, [0u8; 32],
|
|
"InitiatedSession::initial_chain_key not zeroized after drop"
|
|
);
|
|
}
|
|
|
|
#[test]
|
|
#[cfg(not(miri))]
|
|
fn received_session_drop_zeroizes() {
|
|
use soliton::constants;
|
|
use soliton::kex::{
|
|
PreKeyBundle, initiate_session, receive_session, sign_prekey, verify_bundle,
|
|
};
|
|
|
|
let GeneratedIdentity {
|
|
public_key: alice_pk,
|
|
secret_key: alice_sk,
|
|
..
|
|
} = generate_identity().unwrap();
|
|
let GeneratedIdentity {
|
|
public_key: bob_pk,
|
|
secret_key: bob_sk,
|
|
..
|
|
} = generate_identity().unwrap();
|
|
let (spk_pk, spk_sk) = xwing::keygen().unwrap();
|
|
let spk_sig = sign_prekey(&bob_sk, &spk_pk).unwrap();
|
|
let bundle = PreKeyBundle {
|
|
ik_pub: bob_pk.clone(),
|
|
crypto_version: constants::CRYPTO_VERSION.to_string(),
|
|
spk_pub: spk_pk,
|
|
spk_id: 1,
|
|
spk_sig,
|
|
opk_pub: None,
|
|
opk_id: None,
|
|
};
|
|
let vb = verify_bundle(bundle, &bob_pk).unwrap();
|
|
let initiated = initiate_session(&alice_pk, &alice_sk, &vb).unwrap();
|
|
|
|
let received = receive_session(
|
|
&bob_pk,
|
|
&bob_sk,
|
|
&alice_pk,
|
|
&initiated.session_init,
|
|
&initiated.sender_sig,
|
|
&spk_sk,
|
|
None,
|
|
)
|
|
.unwrap();
|
|
|
|
let mut md = ManuallyDrop::new(received);
|
|
#[allow(deprecated)]
|
|
let rk_ptr = md.root_key_ptr();
|
|
#[allow(deprecated)]
|
|
let ck_ptr = md.initial_chain_key_ptr();
|
|
|
|
let rk_before = unsafe { std::ptr::read_volatile(rk_ptr as *const [u8; 32]) };
|
|
assert!(
|
|
rk_before.iter().any(|&b| b != 0),
|
|
"root_key was zero before drop"
|
|
);
|
|
let ck_before = unsafe { std::ptr::read_volatile(ck_ptr as *const [u8; 32]) };
|
|
assert!(
|
|
ck_before.iter().any(|&b| b != 0),
|
|
"initial_chain_key was zero before drop"
|
|
);
|
|
|
|
unsafe { ManuallyDrop::drop(&mut md) };
|
|
|
|
let rk_after = unsafe { std::ptr::read_volatile(rk_ptr as *const [u8; 32]) };
|
|
let ck_after = unsafe { std::ptr::read_volatile(ck_ptr as *const [u8; 32]) };
|
|
assert_eq!(
|
|
rk_after, [0u8; 32],
|
|
"ReceivedSession::root_key not zeroized after drop"
|
|
);
|
|
assert_eq!(
|
|
ck_after, [0u8; 32],
|
|
"ReceivedSession::initial_chain_key not zeroized after drop"
|
|
);
|
|
}
|
|
|
|
// ──────────────────────────────────────────────────────────────────────
|
|
// Phase D: RatchetState zeroization — memory content + observable behavior
|
|
// ──────────────────────────────────────────────────────────────────────
|
|
|
|
#[test]
|
|
#[cfg(not(miri))]
|
|
fn ratchet_state_drop_zeroizes_key_material() {
|
|
// Memory-content verification for RatchetState — the most security-critical
|
|
// type (holds root_key, send_epoch_key, recv_epoch_key). Uses the same
|
|
// ManuallyDrop + read_volatile pattern as other secret-key types to verify
|
|
// that Drop::drop (which delegates to reset()) actually zeros in-place.
|
|
let (ek_pk, ek_sk) = xwing::keygen().unwrap();
|
|
let rk = [0x11u8; 32];
|
|
let ck = [0x22u8; 32];
|
|
let fp_a = [0xAAu8; 32];
|
|
let fp_b = [0xBBu8; 32];
|
|
let alice =
|
|
soliton::ratchet::RatchetState::init_alice(rk, ck, fp_a, fp_b, ek_pk, ek_sk).unwrap();
|
|
|
|
let mut md = ManuallyDrop::new(alice);
|
|
#[allow(deprecated)]
|
|
let rk_ptr = md.root_key_ptr();
|
|
#[allow(deprecated)]
|
|
let sek_ptr = md.send_epoch_key_ptr();
|
|
#[allow(deprecated)]
|
|
let rek_ptr = md.recv_epoch_key_ptr();
|
|
|
|
// Confirm non-zero before drop.
|
|
let rk_before = unsafe { std::ptr::read_volatile(rk_ptr as *const [u8; 32]) };
|
|
let sek_before = unsafe { std::ptr::read_volatile(sek_ptr as *const [u8; 32]) };
|
|
assert!(
|
|
rk_before.iter().any(|&b| b != 0),
|
|
"root_key was zero before drop"
|
|
);
|
|
// send_epoch_key is the epoch_key passed to init_alice (non-zero).
|
|
assert!(
|
|
sek_before.iter().any(|&b| b != 0),
|
|
"send_epoch_key was zero before drop"
|
|
);
|
|
// recv_epoch_key is initialized to [0u8; 32] for Alice (Bob hasn't sent yet),
|
|
// so skip the non-zero pre-check for it — verify zeroization nonetheless.
|
|
|
|
unsafe { ManuallyDrop::drop(&mut md) };
|
|
|
|
let rk_after = unsafe { std::ptr::read_volatile(rk_ptr as *const [u8; 32]) };
|
|
let sek_after = unsafe { std::ptr::read_volatile(sek_ptr as *const [u8; 32]) };
|
|
let rek_after = unsafe { std::ptr::read_volatile(rek_ptr as *const [u8; 32]) };
|
|
assert_eq!(
|
|
rk_after, [0u8; 32],
|
|
"RatchetState::root_key not zeroized after drop"
|
|
);
|
|
assert_eq!(
|
|
sek_after, [0u8; 32],
|
|
"RatchetState::send_epoch_key not zeroized after drop"
|
|
);
|
|
assert_eq!(
|
|
rek_after, [0u8; 32],
|
|
"RatchetState::recv_epoch_key not zeroized after drop"
|
|
);
|
|
}
|
|
|
|
/// Helper: create a ratchet pair for testing.
|
|
fn make_ratchet_pair() -> (
|
|
soliton::ratchet::RatchetState,
|
|
soliton::ratchet::RatchetState,
|
|
[u8; 32],
|
|
[u8; 32],
|
|
) {
|
|
use soliton::primitives::random;
|
|
let (ek_pk, ek_sk) = xwing::keygen().unwrap();
|
|
let rk: [u8; 32] = random::random_array();
|
|
let ck: [u8; 32] = random::random_array();
|
|
let fp_a = [0xAAu8; 32];
|
|
let fp_b = [0xBBu8; 32];
|
|
let alice =
|
|
soliton::ratchet::RatchetState::init_alice(rk, ck, fp_a, fp_b, ek_pk.clone(), ek_sk)
|
|
.unwrap();
|
|
let bob = soliton::ratchet::RatchetState::init_bob(rk, ck, fp_b, fp_a, ek_pk).unwrap();
|
|
(alice, bob, fp_a, fp_b)
|
|
}
|
|
|
|
#[test]
|
|
fn ratchet_reset_then_encrypt_fails() {
|
|
let (mut alice, _, _fp_a, _fp_b) = make_ratchet_pair();
|
|
assert!(alice.encrypt(b"test").is_ok());
|
|
alice.reset();
|
|
assert!(alice.encrypt(b"test").is_err());
|
|
}
|
|
|
|
#[test]
|
|
fn ratchet_recv_seen_cleared_on_reset() {
|
|
let (mut alice, mut bob, _fp_a, _fp_b) = make_ratchet_pair();
|
|
// Bob receives msg2 out of order — recv_seen tracks the counter.
|
|
let _enc0 = alice.encrypt(b"msg0").unwrap();
|
|
let _enc1 = alice.encrypt(b"msg1").unwrap();
|
|
let enc2 = alice.encrypt(b"msg2").unwrap();
|
|
bob.decrypt(&enc2.header, &enc2.ciphertext).unwrap();
|
|
bob.reset();
|
|
// After reset, recv_seen and all epoch keys should be cleared.
|
|
// The state is unusable (root_key zeroed).
|
|
assert!(bob.encrypt(b"test").is_err());
|
|
}
|
|
|
|
#[test]
|
|
fn ratchet_aead_failure_no_state_leak() {
|
|
let (mut alice, mut bob, _fp_a, _fp_b) = make_ratchet_pair();
|
|
let enc = alice.encrypt(b"good message").unwrap();
|
|
|
|
let mut bad_ct = enc.ciphertext.clone();
|
|
bad_ct[0] ^= 0xFF;
|
|
|
|
assert!(bob.decrypt(&enc.header, &bad_ct).is_err());
|
|
|
|
// State rolled back — valid message still works.
|
|
let pt = bob.decrypt(&enc.header, &enc.ciphertext).unwrap();
|
|
assert_eq!(&*pt, b"good message");
|
|
}
|
|
|
|
// ──────────────────────────────────────────────────────────────────────
|
|
// Phase E: Foundational sanity checks
|
|
// ──────────────────────────────────────────────────────────────────────
|
|
|
|
#[test]
|
|
#[cfg(not(miri))]
|
|
fn zeroizing_array_drop_zeros() {
|
|
// Dependency regression canary: if the `zeroize` crate ever ships a version
|
|
// that doesn't actually zero on Drop (optimizer regression, feature-gate
|
|
// change, etc.), this test catches it before any higher-level zeroization
|
|
// test can be affected. All other tests in this file depend on Zeroizing
|
|
// working correctly — this one verifies that assumption directly.
|
|
use zeroize::Zeroizing;
|
|
let secret = Zeroizing::new([0xAAu8; 32]);
|
|
// Use ManuallyDrop to avoid the move in std::mem::drop.
|
|
let mut md = ManuallyDrop::new(secret);
|
|
let ptr = md.as_ptr();
|
|
let before = unsafe { std::ptr::read_volatile(ptr as *const [u8; 32]) };
|
|
assert_eq!(before, [0xAAu8; 32]);
|
|
// Drop in-place — Zeroizing's Drop zeros the inner value.
|
|
unsafe { ManuallyDrop::drop(&mut md) };
|
|
let after = unsafe { std::ptr::read_volatile(ptr as *const [u8; 32]) };
|
|
assert_eq!(
|
|
after, [0u8; 32],
|
|
"Zeroizing<[u8; 32]> not zeroed after drop"
|
|
);
|
|
}
|
|
|
|
#[test]
|
|
#[cfg(all(target_os = "linux", not(miri)))]
|
|
fn zeroizing_vec_drop_zeros() {
|
|
use zeroize::Zeroizing;
|
|
let secret = Zeroizing::new(vec![0xBBu8; 256]);
|
|
let mut md = ManuallyDrop::new(secret);
|
|
let ptr = md.as_ptr();
|
|
let len = md.len();
|
|
let before = read_process_bytes(ptr, len);
|
|
assert!(before.iter().all(|&b| b == 0xBB));
|
|
unsafe { ManuallyDrop::drop(&mut md) };
|
|
assert_heap_zeroed(ptr, len, "Zeroizing<Vec<u8>>");
|
|
}
|
|
|
|
#[test]
|
|
#[cfg(not(miri))]
|
|
fn storage_key_zeroized_on_drop() {
|
|
use soliton::primitives::random;
|
|
let key = StorageKey::new(1, random::random_array()).unwrap();
|
|
let mut md = ManuallyDrop::new(key);
|
|
let ptr = md.key().as_ptr();
|
|
let before = unsafe { std::ptr::read_volatile(ptr as *const [u8; 32]) };
|
|
assert!(
|
|
before.iter().any(|&b| b != 0),
|
|
"StorageKey was zero before drop"
|
|
);
|
|
unsafe { ManuallyDrop::drop(&mut md) };
|
|
let after = unsafe { std::ptr::read_volatile(ptr as *const [u8; 32]) };
|
|
assert_eq!(after, [0u8; 32], "StorageKey::key not zeroized after drop");
|
|
}
|
|
|
|
// ──────────────────────────────────────────────────────────────────────
|
|
// Phase F: Copy-gap pattern validation
|
|
// ──────────────────────────────────────────────────────────────────────
|
|
|
|
#[test]
|
|
#[cfg(not(miri))]
|
|
fn copy_gap_pattern_validated() {
|
|
use zeroize::Zeroize;
|
|
// Demonstrate the Copy-gap and its mitigation.
|
|
|
|
// 1. Create a secret [u8; 32] value.
|
|
let mut original = [0xCCu8; 32];
|
|
let original_ptr = original.as_ptr();
|
|
|
|
// 2. Copy into Zeroizing (Copy semantics — original unchanged).
|
|
let wrapper = zeroize::Zeroizing::new(original);
|
|
|
|
// original still holds 0xCC — this is the "copy gap".
|
|
assert_eq!(
|
|
unsafe { std::ptr::read_volatile(original_ptr as *const [u8; 32]) },
|
|
[0xCCu8; 32],
|
|
"original should still hold secret after copy into Zeroizing",
|
|
);
|
|
|
|
// 3. Explicitly zeroize the original (as the codebase does).
|
|
original.zeroize();
|
|
assert_eq!(
|
|
unsafe { std::ptr::read_volatile(original_ptr as *const [u8; 32]) },
|
|
[0u8; 32],
|
|
"original not zeroized after explicit .zeroize()",
|
|
);
|
|
|
|
// 4. Zeroizing wrapper still holds the value.
|
|
assert_eq!(*wrapper, [0xCCu8; 32]);
|
|
|
|
// 5. Drop the wrapper via ManuallyDrop (in-place, no move).
|
|
let mut md = ManuallyDrop::new(wrapper);
|
|
let wrapper_ptr = md.as_ptr();
|
|
unsafe { ManuallyDrop::drop(&mut md) };
|
|
let after = unsafe { std::ptr::read_volatile(wrapper_ptr as *const [u8; 32]) };
|
|
assert_eq!(after, [0u8; 32], "Zeroizing wrapper not zeroed after drop");
|
|
}
|