initial commit
Some checks failed
CI / lint (push) Successful in 1m37s
CI / test-python (push) Successful in 1m49s
CI / test-zig (push) Successful in 1m39s
CI / test-wasm (push) Successful in 1m54s
CI / test (push) Successful in 14m44s
CI / miri (push) Successful in 14m18s
CI / build (push) Successful in 1m9s
CI / fuzz-regression (push) Successful in 9m9s
CI / publish (push) Failing after 1m10s
CI / publish-python (push) Failing after 1m46s
CI / publish-wasm (push) Has been cancelled

Signed-off-by: Kamal Tufekcic <kamal@lo.sh>
This commit is contained in:
Kamal Tufekcic 2026-04-02 23:48:10 +03:00
commit 1d99048c95
No known key found for this signature in database
165830 changed files with 79062 additions and 0 deletions

File diff suppressed because one or more lines are too long

View file

@ -0,0 +1,172 @@
//! Integration tests for Argon2id key derivation.
//!
//! Exercises the full passphrase → key → encrypt/decrypt cycle that matches
//! the primary use case: protecting identity keypairs with a user passphrase.
use soliton::primitives::aead::{aead_decrypt, aead_encrypt};
use soliton::primitives::argon2::{Argon2Params, argon2id};
use soliton::primitives::random::random_array;
/// Fast parameters for tests — not for production use.
///
/// m=8 KiB (minimum), t=1, p=1.
const FAST: Argon2Params = Argon2Params {
m_cost: 8,
t_cost: 1,
p_cost: 1,
};
/// Derives a 32-byte key from a passphrase, encrypts a payload,
/// re-derives the same key, and decrypts — verifying round-trip correctness.
#[test]
fn passphrase_protects_keypair() {
let passphrase = b"correct horse battery staple";
let salt: [u8; 16] = random_array();
let nonce: [u8; 24] = random_array();
// Simulate stored keypair bytes.
let keypair_bytes = b"secret identity keypair bytes (not real, just for test)";
// Derive encryption key from passphrase.
let mut enc_key = [0u8; 32];
argon2id(passphrase, &salt, FAST, &mut enc_key).unwrap();
// Encrypt the keypair.
let ciphertext = aead_encrypt(&enc_key, &nonce, keypair_bytes, b"").unwrap();
// Re-derive with the same passphrase + salt → identical key.
let mut dec_key = [0u8; 32];
argon2id(passphrase, &salt, FAST, &mut dec_key).unwrap();
assert_eq!(enc_key, dec_key);
// Decrypt and verify.
let plaintext = aead_decrypt(&dec_key, &nonce, &ciphertext, b"").unwrap();
assert_eq!(&*plaintext, keypair_bytes.as_slice());
}
/// A wrong passphrase derives a different key, causing AEAD authentication failure.
#[test]
fn wrong_passphrase_fails_decryption() {
let salt: [u8; 16] = random_array();
let nonce: [u8; 24] = random_array();
let mut key_correct = [0u8; 32];
argon2id(b"correct passphrase", &salt, FAST, &mut key_correct).unwrap();
let ciphertext = aead_encrypt(&key_correct, &nonce, b"secret keypair", b"").unwrap();
let mut key_wrong = [0u8; 32];
argon2id(b"wrong passphrase", &salt, FAST, &mut key_wrong).unwrap();
assert!(
aead_decrypt(&key_wrong, &nonce, &ciphertext, b"").is_err(),
"decryption with wrong passphrase must fail"
);
}
/// A different salt (e.g. from a different device or key slot) produces a
/// different derived key, causing AEAD authentication failure.
#[test]
fn wrong_salt_fails_decryption() {
let salt_a = [0x11u8; 16];
let salt_b = [0x22u8; 16];
let nonce: [u8; 24] = random_array();
let mut key_a = [0u8; 32];
argon2id(b"passphrase", &salt_a, FAST, &mut key_a).unwrap();
let ciphertext = aead_encrypt(&key_a, &nonce, b"secret keypair", b"").unwrap();
let mut key_b = [0u8; 32];
argon2id(b"passphrase", &salt_b, FAST, &mut key_b).unwrap();
assert!(
aead_decrypt(&key_b, &nonce, &ciphertext, b"").is_err(),
"decryption with different salt must fail"
);
}
/// An empty passphrase (`b""`) is valid input — Argon2id accepts zero-length
/// passwords and returns deterministic output.
#[test]
fn empty_passphrase_is_accepted() {
let salt: [u8; 16] = [0x01u8; 16];
let mut out1 = [0u8; 32];
let mut out2 = [0u8; 32];
// Must not error — empty passphrase is explicitly allowed by Argon2 spec.
argon2id(b"", &salt, FAST, &mut out1).expect("empty passphrase must be accepted");
argon2id(b"", &salt, FAST, &mut out2).unwrap();
// Must be deterministic.
assert_eq!(
out1, out2,
"empty passphrase must produce deterministic output"
);
// Must differ from a non-empty passphrase.
let mut out_nonempty = [0u8; 32];
argon2id(b"x", &salt, FAST, &mut out_nonempty).unwrap();
assert_ne!(
out1, out_nonempty,
"empty and non-empty passphrases must produce different keys"
);
}
/// A salt shorter than the minimum valid length (8 bytes for Argon2) must
/// return a clear error rather than panicking or silently producing output.
#[test]
fn short_salt_returns_error() {
use soliton::error::Error;
let mut out = [0u8; 32];
let result = argon2id(b"passphrase", b"short", FAST, &mut out);
assert!(
matches!(
result,
Err(Error::InvalidLength {
expected: 8,
got: 5
})
),
"expected InvalidLength {{ expected: 8, got: 5 }}, got: {:?}",
result,
);
}
/// Verifies that `Argon2Params::OWASP_MIN` and `Argon2Params::RECOMMENDED`
/// constants compile and produce non-zero output.
///
/// These use real cost parameters and will run slower than other tests (~0.5-2 s
/// depending on hardware). The `#[ignore]` attribute keeps them out of `cargo test`
/// by default; run with `cargo test -- --ignored` to execute.
#[test]
#[ignore = "slow: uses production-grade Argon2 parameters (~0.5-2 s)"]
fn recommended_params_produce_output() {
let mut out = [0u8; 32];
argon2id(
b"passphrase",
b"saltsaltsaltsalt",
Argon2Params::RECOMMENDED,
&mut out,
)
.unwrap();
assert!(out.iter().any(|&b| b != 0));
}
/// Verifies that `Argon2Params::OWASP_MIN` (19 MiB, t=2, p=1) compiles and
/// produces non-zero output. OWASP_MIN is the absolute minimum recommended for
/// new applications; this test confirms the preset is correctly defined and
/// accepted by the Argon2id implementation.
///
/// Uses `#[ignore]` to keep it out of normal CI — run with
/// `cargo test -- --ignored` to execute.
#[test]
#[ignore = "slow: uses OWASP minimum Argon2 parameters (~19 MiB, t=2, ~0.5 s)"]
fn owasp_min_params_produce_output() {
let mut out = [0u8; 32];
argon2id(
b"passphrase",
b"saltsaltsaltsalt",
Argon2Params::OWASP_MIN,
&mut out,
)
.unwrap();
assert!(out.iter().any(|&b| b != 0));
}

View file

@ -0,0 +1,758 @@
#![allow(deprecated)] // Tests exercise from_bytes directly for parser coverage.
use soliton::constants;
use soliton::identity::{
GeneratedIdentity, IdentityPublicKey, IdentitySecretKey, generate_identity,
};
use soliton::kex::{
PreKeyBundle, build_first_message_aad, initiate_session, receive_session, sign_prekey,
verify_bundle,
};
use soliton::primitives::xwing;
use soliton::ratchet::RatchetState;
/// Generate a full identity + signed pre-key bundle for testing.
fn setup_peer() -> (
IdentityPublicKey,
IdentitySecretKey,
xwing::PublicKey,
xwing::SecretKey,
PreKeyBundle,
) {
let GeneratedIdentity {
public_key: ik_pk,
secret_key: ik_sk,
..
} = generate_identity().unwrap();
let (spk_pk, spk_sk) = xwing::keygen().unwrap();
let spk_sig = sign_prekey(&ik_sk, &spk_pk).unwrap();
let bundle = PreKeyBundle {
ik_pub: IdentityPublicKey::from_bytes(ik_pk.as_bytes().to_vec()).unwrap(),
crypto_version: constants::CRYPTO_VERSION.to_string(),
spk_pub: spk_pk.clone(),
spk_id: 1,
spk_sig,
opk_pub: None,
opk_id: None,
};
(ik_pk, ik_sk, spk_pk, spk_sk, bundle)
}
/// Run a full KEX and return initialized ratchet states + fingerprints.
fn do_kex(with_opk: bool) -> (RatchetState, RatchetState, [u8; 32], [u8; 32]) {
let (alice_ik_pk, alice_ik_sk, _alice_spk_pk, _alice_spk_sk, _alice_bundle) = setup_peer();
let (bob_ik_pk, bob_ik_sk, _bob_spk_pk, bob_spk_sk, mut bob_bundle) = setup_peer();
let opk_sk = if with_opk {
let (pk, sk) = xwing::keygen().unwrap();
bob_bundle.opk_pub = Some(pk);
bob_bundle.opk_id = Some(42);
Some(sk)
} else {
None
};
let fp_a = alice_ik_pk.fingerprint_raw();
let fp_b = bob_ik_pk.fingerprint_raw();
// Alice verifies Bob's bundle and initiates session.
let verified = verify_bundle(bob_bundle, &bob_ik_pk).unwrap();
let mut initiated = initiate_session(&alice_ik_pk, &alice_ik_sk, &verified).unwrap();
// Build first message AAD.
let aad = build_first_message_aad(&fp_a, &fp_b, &initiated.session_init).unwrap();
// Alice encrypts first message, consuming the initial chain key.
let (first_ct, alice_ck) =
RatchetState::encrypt_first_message(initiated.take_initial_chain_key(), b"hello bob", &aad)
.unwrap();
// Bob receives the session — sender_sig proves Alice initiated (not an impersonator).
let mut received = receive_session(
&bob_ik_pk,
&bob_ik_sk,
&alice_ik_pk,
&initiated.session_init,
&initiated.sender_sig,
&bob_spk_sk,
opk_sk.as_ref(),
)
.unwrap();
// Bob decrypts the first message, consuming the initial chain key.
let (first_pt, bob_ck) =
RatchetState::decrypt_first_message(received.take_initial_chain_key(), &first_ct, &aad)
.unwrap();
assert_eq!(&*first_pt, b"hello bob");
// Chain keys must match after first message.
assert_eq!(*alice_ck, *bob_ck);
// Extract keys via take_ methods — each replaces the internal value with
// zeros, enforcing single-use. ek_pk/peer_ek are pub (non-secret).
let ek_pk = xwing::PublicKey::from_bytes(initiated.ek_pk.as_bytes().to_vec()).unwrap();
let ek_sk = xwing::SecretKey::from_bytes(initiated.ek_sk().as_bytes().to_vec()).unwrap();
let peer_ek = xwing::PublicKey::from_bytes(received.peer_ek.as_bytes().to_vec()).unwrap();
let alice = RatchetState::init_alice(
*initiated.take_root_key(),
*alice_ck,
fp_a,
fp_b,
ek_pk,
ek_sk,
)
.unwrap();
let bob =
RatchetState::init_bob(*received.take_root_key(), *bob_ck, fp_b, fp_a, peer_ek).unwrap();
(alice, bob, fp_a, fp_b)
}
/// Helper: Alice sends to Bob.
fn send_a_to_b(alice: &mut RatchetState, bob: &mut RatchetState, msg: &[u8]) -> Vec<u8> {
let enc = alice.encrypt(msg).unwrap();
let pt = bob.decrypt(&enc.header, &enc.ciphertext).unwrap();
pt.to_vec()
}
/// Helper: Bob sends to Alice.
fn send_b_to_a(alice: &mut RatchetState, bob: &mut RatchetState, msg: &[u8]) -> Vec<u8> {
let enc = bob.encrypt(msg).unwrap();
let pt = alice.decrypt(&enc.header, &enc.ciphertext).unwrap();
pt.to_vec()
}
#[test]
fn full_session_lifecycle() {
let (mut alice, mut bob, _fp_a, _fp_b) = do_kex(true);
// Exchange a few messages.
let pt = send_a_to_b(&mut alice, &mut bob, b"msg1");
assert_eq!(pt, b"msg1");
let pt = send_b_to_a(&mut alice, &mut bob, b"msg2");
assert_eq!(pt, b"msg2");
// Serialize both sides.
let alice_bytes = alice.to_bytes().unwrap().0;
let bob_bytes = bob.to_bytes().unwrap().0;
// Deserialize.
let mut alice2 = RatchetState::from_bytes(&alice_bytes).unwrap();
let mut bob2 = RatchetState::from_bytes(&bob_bytes).unwrap();
// Continue conversation after deserialization.
let pt = send_a_to_b(&mut alice2, &mut bob2, b"post-restore");
assert_eq!(pt, b"post-restore");
}
#[test]
fn basic_session_with_opk_enabled() {
let (mut alice, mut bob, _fp_a, _fp_b) = do_kex(true);
let pt = send_a_to_b(&mut alice, &mut bob, b"with opk");
assert_eq!(pt, b"with opk");
let pt = send_b_to_a(&mut alice, &mut bob, b"reply");
assert_eq!(pt, b"reply");
}
#[test]
fn full_session_without_opk() {
let (mut alice, mut bob, _fp_a, _fp_b) = do_kex(false);
let pt = send_a_to_b(&mut alice, &mut bob, b"no opk");
assert_eq!(pt, b"no opk");
let pt = send_b_to_a(&mut alice, &mut bob, b"reply");
assert_eq!(pt, b"reply");
}
#[test]
fn bidirectional_ratchet() {
let (mut alice, mut bob, _fp_a, _fp_b) = do_kex(true);
// Multiple ratchet epochs: direction changes trigger DH ratchet steps.
for i in 0..10u32 {
if i % 2 == 0 {
let msg = format!("a-to-b #{i}");
let pt = send_a_to_b(&mut alice, &mut bob, msg.as_bytes());
assert_eq!(pt, msg.as_bytes());
} else {
let msg = format!("b-to-a #{i}");
let pt = send_b_to_a(&mut alice, &mut bob, msg.as_bytes());
assert_eq!(pt, msg.as_bytes());
}
}
}
#[test]
fn out_of_order_across_ratchet_steps() {
let (mut alice, mut bob, _fp_a, _fp_b) = do_kex(true);
// Alice sends 3 messages in the same epoch.
let enc0 = alice.encrypt(b"msg0").unwrap();
let enc1 = alice.encrypt(b"msg1").unwrap();
let enc2 = alice.encrypt(b"msg2").unwrap();
// Bob receives msg2 first (skipping 0 and 1).
let pt2 = bob.decrypt(&enc2.header, &enc2.ciphertext).unwrap();
assert_eq!(&*pt2, b"msg2");
// Bob sends back (triggers ratchet step on both sides when Alice receives).
let enc_b = bob.encrypt(b"bob reply").unwrap();
let pt_b = alice.decrypt(&enc_b.header, &enc_b.ciphertext).unwrap();
assert_eq!(&*pt_b, b"bob reply");
// Bob can still decrypt the earlier messages via prev_recv_epoch_key grace period.
let pt0 = bob.decrypt(&enc0.header, &enc0.ciphertext).unwrap();
assert_eq!(&*pt0, b"msg0");
let pt1 = bob.decrypt(&enc1.header, &enc1.ciphertext).unwrap();
assert_eq!(&*pt1, b"msg1");
}
#[test]
fn serialize_with_out_of_order_then_consume() {
let (mut alice, mut bob, _fp_a, _fp_b) = do_kex(true);
// Alice sends 3 messages.
let enc0 = alice.encrypt(b"msg0").unwrap();
let enc1 = alice.encrypt(b"msg1").unwrap();
let enc2 = alice.encrypt(b"msg2").unwrap();
// Bob receives only msg2 (out of order).
bob.decrypt(&enc2.header, &enc2.ciphertext).unwrap();
// Serialize and deserialize Bob.
let bob_bytes = bob.to_bytes().unwrap().0;
let mut bob2 = RatchetState::from_bytes(&bob_bytes).unwrap();
// Consume the out-of-order messages via counter-mode derivation.
let pt0 = bob2.decrypt(&enc0.header, &enc0.ciphertext).unwrap();
assert_eq!(&*pt0, b"msg0");
let pt1 = bob2.decrypt(&enc1.header, &enc1.ciphertext).unwrap();
assert_eq!(&*pt1, b"msg1");
}
#[test]
fn session_reset_reestablishment() {
let (mut alice, mut bob, _fp_a, _fp_b) = do_kex(true);
send_a_to_b(&mut alice, &mut bob, b"before reset");
// Reset both sides.
alice.reset();
bob.reset();
// Old states are unusable.
assert!(alice.encrypt(b"test").is_err());
assert!(bob.encrypt(b"test").is_err());
// Establish a completely new session.
let (mut alice2, mut bob2, _fp_a2, _fp_b2) = do_kex(false);
let pt = send_a_to_b(&mut alice2, &mut bob2, b"new session");
assert_eq!(pt, b"new session");
}
#[test]
fn long_conversation_stress() {
let (mut alice, mut bob, _fp_a, _fp_b) = do_kex(true);
for i in 0..120u32 {
let pt = if i % 5 < 3 {
send_a_to_b(&mut alice, &mut bob, &i.to_be_bytes())
} else {
send_b_to_a(&mut alice, &mut bob, &i.to_be_bytes())
};
// Serialize mid-conversation.
if i == 60 {
let a_bytes = alice.to_bytes().unwrap().0;
let b_bytes = bob.to_bytes().unwrap().0;
alice = RatchetState::from_bytes(&a_bytes).unwrap();
bob = RatchetState::from_bytes(&b_bytes).unwrap();
}
// Verify plaintext content for the first message after the serialization
// round-trip to confirm state survives serialization without corrupting
// message content (not just AEAD authentication).
if i == 61 {
assert_eq!(
pt,
(61u32).to_be_bytes(),
"plaintext must survive serialization round-trip"
);
}
}
}
#[test]
fn ratchet_derive_call_keys_delegates() {
let (alice, bob, fp_a, fp_b) = do_kex(true);
let kem_ss: [u8; 32] = soliton::primitives::random::random_array();
let call_id: [u8; 16] = soliton::primitives::random::random_array();
// Cross-party agreement: alice's send == bob's recv and vice versa.
let alice_keys = alice.derive_call_keys(&kem_ss, &call_id).unwrap();
let bob_keys = bob.derive_call_keys(&kem_ss, &call_id).unwrap();
assert_eq!(alice_keys.send_key(), bob_keys.recv_key());
assert_eq!(alice_keys.recv_key(), bob_keys.send_key());
// White-box: verify RatchetState::derive_call_keys truly delegates to
// call::derive_call_keys with self.root_key and stored fingerprints.
// Both parties share the same root key after KEX, so verify with Alice's side.
#[allow(deprecated)]
let root_key = alice.root_key_bytes();
let direct =
soliton::call::derive_call_keys(root_key, &kem_ss, &call_id, &fp_a, &fp_b).unwrap();
assert_eq!(alice_keys.send_key(), direct.send_key());
assert_eq!(alice_keys.recv_key(), direct.recv_key());
}
#[test]
fn ratchet_mismatched_fingerprints_returns_aead_failed() {
// Fingerprints are bound at init time. If Alice and Bob are initialized
// with inconsistent fingerprint orderings, the AAD won't match and
// decryption fails with AeadFailed.
use soliton::error::Error;
let (ek_pk, ek_sk) = xwing::keygen().unwrap();
let rk = [0x11u8; 32];
let ck = [0x22u8; 32];
let fp_a = [0xAAu8; 32];
let fp_b = [0xBBu8; 32];
// Alice: local=fp_a, remote=fp_b (correct)
let mut alice = RatchetState::init_alice(rk, ck, fp_a, fp_b, ek_pk.clone(), ek_sk).unwrap();
// Bob initialized with WRONG fingerprint ordering (swapped local/remote)
let mut bad_bob = RatchetState::init_bob(rk, ck, fp_a, fp_b, ek_pk).unwrap();
let enc = alice.encrypt(b"secret").unwrap();
// Bad Bob's AAD uses remote_fp=fp_b as sender, local_fp=fp_a as recipient,
// producing AAD = fp_b || fp_a. Alice used fp_a || fp_b → mismatch.
let result = bad_bob.decrypt(&enc.header, &enc.ciphertext);
assert!(
matches!(result, Err(Error::AeadFailed)),
"mismatched fingerprint ordering must return AeadFailed, got: {:?}",
result,
);
}
#[test]
fn reflection_attack_rejected() {
// Abstract.md Theorem 12: A→B ciphertext reflected back to A as B→A must
// fail. The fingerprint ordering in AAD differs between encrypt (local_fp
// || remote_fp) and decrypt (remote_fp || local_fp), so the AEAD tag
// cannot authenticate. In practice, the reflection may fail earlier (e.g.,
// InvalidData from KEM decapsulation of a ciphertext encapsulated to the
// wrong key) — the exact error variant depends on ratchet state. The
// security property is that decryption never succeeds.
let (mut alice, _bob, _fp_a, _fp_b) = do_kex(true);
let enc = alice.encrypt(b"hello bob").unwrap();
// Alice tries to decrypt her own ciphertext (reflection).
let result = alice.decrypt(&enc.header, &enc.ciphertext);
assert!(
result.is_err(),
"reflected ciphertext must be rejected, got: Ok(...)",
);
}
#[test]
fn bob_sends_before_receiving() {
// Bob comes online and sends before any of Alice's messages arrive.
// This exercises init_bob → immediate encrypt with ratchet_pending=true
// and recv_ratchet_pk = pk_EK from KEX (the realistic network-delay scenario).
let (mut alice, mut bob, _fp_a, _fp_b) = do_kex(true);
// Alice sends messages Bob hasn't received yet.
let enc_a0 = alice.encrypt(b"alice msg 0").unwrap();
let enc_a1 = alice.encrypt(b"alice msg 1").unwrap();
// Bob sends first — triggers KEM ratchet from his pending state.
let pt = send_b_to_a(&mut alice, &mut bob, b"bob first");
assert_eq!(pt, b"bob first");
// Alice's messages still decrypt: both sides start counters at 1 (post-KEX),
// so enc_a0 has n=1 matching Bob's recv_count=1 — no skipping needed.
let pt0 = bob.decrypt(&enc_a0.header, &enc_a0.ciphertext).unwrap();
assert_eq!(&*pt0, b"alice msg 0");
let pt1 = bob.decrypt(&enc_a1.header, &enc_a1.ciphertext).unwrap();
assert_eq!(&*pt1, b"alice msg 1");
// Conversation continues normally.
let pt = send_a_to_b(&mut alice, &mut bob, b"after catch-up");
assert_eq!(pt, b"after catch-up");
}
#[test]
fn ratchet_new_epoch_without_kem_ct_returns_invalid_data() {
use soliton::error::Error;
use soliton::ratchet::RatchetHeader;
// Alice's initial state has recv_ratchet_pk = None. Any incoming ratchet_pk
// is treated as a new epoch (KEM ratchet step). Without a KEM ciphertext,
// the ratchet step cannot proceed → InvalidData.
let (ek_pk, ek_sk) = xwing::keygen().unwrap();
let rk = [0x11u8; 32];
let ck = [0x22u8; 32];
let fp_a = [0xAAu8; 32];
let fp_b = [0xBBu8; 32];
let mut alice = RatchetState::init_alice(rk, ck, fp_a, fp_b, ek_pk, ek_sk).unwrap();
let dummy_pk = xwing::PublicKey::from_bytes(vec![0u8; 1216]).unwrap();
let header = RatchetHeader {
ratchet_pk: dummy_pk,
kem_ct: None,
n: 0,
pn: 1,
};
let result = alice.decrypt(&header, &[]);
assert!(
matches!(result, Err(Error::InvalidData)),
"new-epoch message without kem_ct must return InvalidData, got: {:?}",
result,
);
}
// ── Security property tests (RT-540, RT-541, RT-542) ──────────────────
#[test]
fn forward_secrecy_old_epoch_keys_unrecoverable() {
// RT-540: After a KEM ratchet step, message keys from the old epoch
// must be unrecoverable. Verify by capturing a ciphertext, advancing
// the ratchet, then confirming the old ciphertext cannot be replayed
// against the new state.
let (mut alice, mut bob, _fp_a, _fp_b) = do_kex(true);
// Epoch 0: Alice sends, Bob receives.
let enc0 = alice.encrypt(b"epoch0 msg").unwrap();
bob.decrypt(&enc0.header, &enc0.ciphertext).unwrap();
// Trigger KEM ratchet: Bob replies, Alice receives → new epoch.
let enc_b = bob.encrypt(b"reply").unwrap();
alice.decrypt(&enc_b.header, &enc_b.ciphertext).unwrap();
// Another ratchet step: Alice sends again → epoch advances again.
let enc_a2 = alice.encrypt(b"epoch2 msg").unwrap();
bob.decrypt(&enc_a2.header, &enc_a2.ciphertext).unwrap();
// Bob replies again → second ratchet step. prev_recv_epoch_key from
// epoch 0 is now gone (only one-epoch grace period).
let enc_b2 = bob.encrypt(b"reply2").unwrap();
alice.decrypt(&enc_b2.header, &enc_b2.ciphertext).unwrap();
// Serialize Bob to get a snapshot of current state.
let bob_bytes = bob.to_bytes().unwrap().0;
let mut bob_replay = RatchetState::from_bytes(&bob_bytes).unwrap();
// Attempt to decrypt the epoch-0 message — must fail. The epoch-0
// epoch key has been overwritten by two subsequent KEM ratchet steps.
// The replayed message's ratchet_pk matches prev_recv_ratchet_pk
// (one-epoch grace period), so AEAD succeeds with the retained key.
// Counter n=0 is already in prev_recv_seen → DuplicateMessage.
let result = bob_replay.decrypt(&enc0.header, &enc0.ciphertext);
assert!(
matches!(result, Err(soliton::error::Error::DuplicateMessage)),
"old epoch replay must be caught as duplicate"
);
}
#[test]
fn cross_session_isolation() {
// RT-541: Two independent KEX sessions from the same identities must
// produce completely independent key material.
let (mut alice1, mut bob1, _fp_a1, _fp_b1) = do_kex(true);
let (mut alice2, mut bob2, _fp_a2, _fp_b2) = do_kex(true);
// Encrypt the same plaintext in both sessions.
let enc1 = alice1.encrypt(b"same plaintext").unwrap();
let enc2 = alice2.encrypt(b"same plaintext").unwrap();
// Ciphertexts must differ (different keys, different nonces from
// independent ratchet states).
assert_ne!(
enc1.ciphertext, enc2.ciphertext,
"two sessions must produce different ciphertexts for the same plaintext"
);
// Cross-decrypt must fail: session 1's ciphertext cannot decrypt
// under session 2's state.
let result = bob2.decrypt(&enc1.header, &enc1.ciphertext);
assert!(result.is_err(), "cross-session decryption must fail");
// Verify session 1 still works independently.
let pt1 = bob1.decrypt(&enc1.header, &enc1.ciphertext).unwrap();
assert_eq!(&*pt1, b"same plaintext");
}
#[test]
fn break_in_recovery_via_kem_ratchet() {
// RT-542: After a KEM ratchet step heals a compromised state, the
// attacker's snapshot becomes useless for future messages.
let (mut alice, mut bob, _fp_a, _fp_b) = do_kex(true);
// Exchange initial messages to establish a working session.
send_a_to_b(&mut alice, &mut bob, b"setup1");
send_b_to_a(&mut alice, &mut bob, b"setup2");
// Attacker captures Bob's state (simulating key compromise).
let compromised_bob_bytes = bob.to_bytes().unwrap().0;
// Legitimate conversation continues — triggers KEM ratchet steps.
let mut bob = RatchetState::from_bytes(&compromised_bob_bytes).unwrap();
send_a_to_b(&mut alice, &mut bob, b"heal1");
send_b_to_a(&mut alice, &mut bob, b"heal2");
// One more round to push the compromised epoch out of grace period.
send_a_to_b(&mut alice, &mut bob, b"heal3");
send_b_to_a(&mut alice, &mut bob, b"heal4");
// New message after healing ratchet steps.
let enc_post_heal = alice.encrypt(b"secret after healing").unwrap();
bob.decrypt(&enc_post_heal.header, &enc_post_heal.ciphertext)
.unwrap();
// Attacker's compromised snapshot cannot decrypt the post-heal message.
let mut attacker_bob = RatchetState::from_bytes(&compromised_bob_bytes).unwrap();
let result = attacker_bob.decrypt(&enc_post_heal.header, &enc_post_heal.ciphertext);
assert!(
result.is_err(),
"compromised state must not decrypt messages after KEM ratchet healing"
);
}
// ── Ratchet property tests (RT-546, RT-547, RT-548, RT-557, RT-558) ───
#[test]
fn replay_rejected_across_serialization_boundary() {
// RT-546: Replay detection must survive serialization round-trip.
use soliton::error::Error;
let (mut alice, mut bob, _fp_a, _fp_b) = do_kex(true);
let enc = alice.encrypt(b"unique msg").unwrap();
// Bob decrypts once.
bob.decrypt(&enc.header, &enc.ciphertext).unwrap();
// Serialize and deserialize Bob.
let bob_bytes = bob.to_bytes().unwrap().0;
let mut bob2 = RatchetState::from_bytes(&bob_bytes).unwrap();
// Replay the same message — must be rejected as duplicate.
let result = bob2.decrypt(&enc.header, &enc.ciphertext);
assert!(
matches!(result, Err(Error::DuplicateMessage)),
"replay after serialization must return DuplicateMessage, got: {:?}",
result,
);
}
#[test]
fn recv_seen_persists_across_serialization() {
// RT-547: recv_seen set must survive serialization round-trip.
// Send messages out of order, serialize, then verify the gaps are
// still tracked and late messages still decrypt.
let (mut alice, mut bob, _fp_a, _fp_b) = do_kex(true);
let enc0 = alice.encrypt(b"msg0").unwrap();
let enc1 = alice.encrypt(b"msg1").unwrap();
let enc2 = alice.encrypt(b"msg2").unwrap();
// Bob receives 0 and 2, skipping 1.
bob.decrypt(&enc0.header, &enc0.ciphertext).unwrap();
bob.decrypt(&enc2.header, &enc2.ciphertext).unwrap();
// Serialize/deserialize.
let bob_bytes = bob.to_bytes().unwrap().0;
let mut bob2 = RatchetState::from_bytes(&bob_bytes).unwrap();
// msg1 (the gap) must still be decryptable.
let pt1 = bob2.decrypt(&enc1.header, &enc1.ciphertext).unwrap();
assert_eq!(&*pt1, b"msg1");
// msg0 and msg2 must be rejected as duplicates.
use soliton::error::Error;
assert!(matches!(
bob2.decrypt(&enc0.header, &enc0.ciphertext),
Err(Error::DuplicateMessage)
));
assert!(matches!(
bob2.decrypt(&enc2.header, &enc2.ciphertext),
Err(Error::DuplicateMessage)
));
}
#[test]
fn prev_recv_epoch_key_grace_period() {
// RT-548: Messages from the previous epoch decrypt after one KEM
// ratchet step (grace period) but fail after two (grace period expired).
let (mut alice, mut bob, _fp_a, _fp_b) = do_kex(true);
// Alice sends messages in epoch 0.
let enc_old = alice.encrypt(b"old epoch msg").unwrap();
// Trigger one KEM ratchet step: Bob replies → new epoch.
let enc_b = bob.encrypt(b"trigger ratchet").unwrap();
alice.decrypt(&enc_b.header, &enc_b.ciphertext).unwrap();
// Bob has NOT received enc_old yet. One ratchet step has occurred.
// Grace period: old-epoch messages still decrypt.
let pt = bob.decrypt(&enc_old.header, &enc_old.ciphertext).unwrap();
assert_eq!(&*pt, b"old epoch msg");
// Now trigger a SECOND ratchet step.
let (mut alice2, mut bob2, _, _) = do_kex(true);
let enc_old2 = alice2.encrypt(b"old epoch 2").unwrap();
// First ratchet step.
let enc_b2 = bob2.encrypt(b"ratchet1").unwrap();
alice2.decrypt(&enc_b2.header, &enc_b2.ciphertext).unwrap();
let enc_a3 = alice2.encrypt(b"ratchet1 reply").unwrap();
bob2.decrypt(&enc_a3.header, &enc_a3.ciphertext).unwrap();
// Second ratchet step.
let enc_b3 = bob2.encrypt(b"ratchet2").unwrap();
alice2.decrypt(&enc_b3.header, &enc_b3.ciphertext).unwrap();
let enc_a4 = alice2.encrypt(b"ratchet2 reply").unwrap();
bob2.decrypt(&enc_a4.header, &enc_a4.ciphertext).unwrap();
// Now try to decrypt the epoch-0 message — grace period expired.
let result = bob2.decrypt(&enc_old2.header, &enc_old2.ciphertext);
assert!(
result.is_err(),
"old epoch message must fail after two ratchet steps (grace period expired), got: {:?}",
result,
);
}
#[test]
fn high_counter_encrypt_decrypt_roundtrip() {
// RT-557: Verify many messages can be sent and received within a single
// epoch, including out-of-order delivery at various counter values.
// The exact u32::MAX-1 edge case is tested by the unit test
// `encrypt_at_send_count_max_minus_one_succeeds` which has direct
// field access. This integration test verifies the contract end-to-end.
let (mut alice, mut bob, _fp_a, _fp_b) = do_kex(true);
// Send many messages in one direction (same epoch, counter climbs).
let mut pending = Vec::new();
for i in 0..50u32 {
let msg = format!("msg-{i}");
pending.push((alice.encrypt(msg.as_bytes()).unwrap(), msg));
}
// Deliver out of order: last first, then remaining.
let (last_enc, last_msg) = pending.pop().unwrap();
let pt = bob.decrypt(&last_enc.header, &last_enc.ciphertext).unwrap();
assert_eq!(&*pt, last_msg.as_bytes());
// Deliver rest in forward order (counter-mode key derivation for each).
for (enc, msg) in &pending {
let pt = bob.decrypt(&enc.header, &enc.ciphertext).unwrap();
assert_eq!(&*pt, msg.as_bytes());
}
}
#[test]
fn multi_epoch_stress_test() {
// RT-558: Exercise >3 consecutive KEM ratchet steps in both directions.
let (mut alice, mut bob, _fp_a, _fp_b) = do_kex(true);
for epoch in 0..8u32 {
let msg_ab = format!("a→b epoch {epoch}");
let pt = send_a_to_b(&mut alice, &mut bob, msg_ab.as_bytes());
assert_eq!(pt, msg_ab.as_bytes());
let msg_ba = format!("b→a epoch {epoch}");
let pt = send_b_to_a(&mut alice, &mut bob, msg_ba.as_bytes());
assert_eq!(pt, msg_ba.as_bytes());
}
// Verify serialization still works after many epochs.
let alice_bytes = alice.to_bytes().unwrap().0;
let bob_bytes = bob.to_bytes().unwrap().0;
let mut alice2 = RatchetState::from_bytes(&alice_bytes).unwrap();
let mut bob2 = RatchetState::from_bytes(&bob_bytes).unwrap();
let pt = send_a_to_b(&mut alice2, &mut bob2, b"post-multi-epoch");
assert_eq!(pt, b"post-multi-epoch");
}
#[test]
fn epoch_isolation_survives_serialization() {
// RT-985: Verify that epoch isolation holds across a serialization boundary.
// Serialize Bob at epoch N, advance to epoch N+2, deserialize the epoch-N
// snapshot, and verify it cannot decrypt epoch-N+2 messages.
let (mut alice, mut bob, _fp_a, _fp_b) = do_kex(true);
// Epoch 0: exchange messages, then serialize Bob.
send_a_to_b(&mut alice, &mut bob, b"epoch0");
// to_bytes consumes self — save the snapshot, then reload Bob to continue.
let bob_epoch0_bytes = bob.to_bytes().unwrap().0;
let mut bob = RatchetState::from_bytes(&bob_epoch0_bytes).unwrap();
// Advance to epoch 1: Bob sends → Alice receives → Alice sends → Bob receives.
let enc_b = bob.encrypt(b"trigger ratchet 1").unwrap();
alice.decrypt(&enc_b.header, &enc_b.ciphertext).unwrap();
let enc_a = alice.encrypt(b"epoch1 reply").unwrap();
bob.decrypt(&enc_a.header, &enc_a.ciphertext).unwrap();
// Advance to epoch 2: Bob sends → Alice receives → Alice sends → Bob receives.
let enc_b2 = bob.encrypt(b"trigger ratchet 2").unwrap();
alice.decrypt(&enc_b2.header, &enc_b2.ciphertext).unwrap();
let enc_a2 = alice.encrypt(b"epoch2 msg").unwrap();
bob.decrypt(&enc_a2.header, &enc_a2.ciphertext).unwrap();
// Deserialize the epoch-0 snapshot and try to decrypt the epoch-2 message.
let mut bob_old = RatchetState::from_bytes(&bob_epoch0_bytes).unwrap();
let result = bob_old.decrypt(&enc_a2.header, &enc_a2.ciphertext);
assert!(
result.is_err(),
"epoch-0 snapshot must not decrypt epoch-2 message, got: {:?}",
result,
);
}
#[test]
fn prev_recv_seen_survives_serialization() {
// RT-990: Verify prev_recv_seen duplicate detection survives serialization.
// A previous-epoch message that was already decrypted must be rejected as
// duplicate after serialize/deserialize.
use soliton::error::Error;
let (mut alice, mut bob, _fp_a, _fp_b) = do_kex(true);
// Alice sends two messages in epoch 0.
let enc0 = alice.encrypt(b"prev-epoch msg0").unwrap();
let enc1 = alice.encrypt(b"prev-epoch msg1").unwrap();
// Bob decrypts msg0 in epoch 0 (msg1 held back).
bob.decrypt(&enc0.header, &enc0.ciphertext).unwrap();
// Trigger Bob's receive-side KEM ratchet: Bob sends → Alice receives →
// Alice sends with new ratchet_pk → Bob receives (triggers KEM ratchet,
// moving recv_seen → prev_recv_seen).
let enc_b = bob.encrypt(b"trigger ratchet").unwrap();
alice.decrypt(&enc_b.header, &enc_b.ciphertext).unwrap();
let enc_a_new = alice.encrypt(b"new epoch reply").unwrap();
bob.decrypt(&enc_a_new.header, &enc_a_new.ciphertext)
.unwrap();
// Bob now decrypts msg1 from the previous epoch (grace period).
// msg1's counter is added to prev_recv_seen.
bob.decrypt(&enc1.header, &enc1.ciphertext).unwrap();
// Serialize/deserialize Bob — prev_recv_seen should contain msg0 and msg1.
let bob_bytes = bob.to_bytes().unwrap().0;
let mut bob2 = RatchetState::from_bytes(&bob_bytes).unwrap();
// Replay msg0 from the previous epoch — must be rejected as duplicate.
let result = bob2.decrypt(&enc0.header, &enc0.ciphertext);
assert!(
matches!(result, Err(Error::DuplicateMessage)),
"prev-epoch replay after serialization must return DuplicateMessage, got: {:?}",
result,
);
// Replay msg1 from the previous epoch — also duplicate.
let result = bob2.decrypt(&enc1.header, &enc1.ciphertext);
assert!(
matches!(result, Err(Error::DuplicateMessage)),
"prev-epoch replay after serialization must return DuplicateMessage, got: {:?}",
result,
);
}

View file

@ -0,0 +1,114 @@
#![allow(deprecated)] // Tests exercise from_bytes directly.
use soliton::primitives::random;
use soliton::primitives::xwing;
use soliton::ratchet::RatchetState;
use soliton::storage::{StorageKey, StorageKeyRing, decrypt_blob, encrypt_blob};
#[test]
fn key_rotation_lifecycle() {
let key_v1 = StorageKey::new(1, random::random_array()).unwrap();
let key_v2 = StorageKey::new(2, random::random_array()).unwrap();
let mut ring = StorageKeyRing::new(key_v1).unwrap();
// Encrypt blobs with v1.
let blob_a = encrypt_blob(
ring.active_key().unwrap(),
b"message A",
"channel-1",
"seg-0",
false,
)
.unwrap();
let blob_b = encrypt_blob(
ring.active_key().unwrap(),
b"message B",
"channel-1",
"seg-1",
true,
)
.unwrap();
// Add v2 as the new active key.
ring.add_key(key_v2, true).unwrap();
assert_eq!(ring.active_key().unwrap().version(), 2);
// Encrypt new blob with v2.
let blob_c = encrypt_blob(
ring.active_key().unwrap(),
b"message C",
"channel-2",
"seg-0",
false,
)
.unwrap();
// Decrypt all blobs — v1 blobs should still work.
let pt_a = decrypt_blob(&ring, &blob_a, "channel-1", "seg-0").unwrap();
assert_eq!(&*pt_a, b"message A");
let pt_b = decrypt_blob(&ring, &blob_b, "channel-1", "seg-1").unwrap();
assert_eq!(&*pt_b, b"message B");
let pt_c = decrypt_blob(&ring, &blob_c, "channel-2", "seg-0").unwrap();
assert_eq!(&*pt_c, b"message C");
// Remove v1.
assert!(ring.remove_key(1).unwrap());
// v1 blobs can no longer be decrypted.
assert!(decrypt_blob(&ring, &blob_a, "channel-1", "seg-0").is_err());
// v2 blobs still work.
let pt_c2 = decrypt_blob(&ring, &blob_c, "channel-2", "seg-0").unwrap();
assert_eq!(&*pt_c2, b"message C");
}
#[test]
fn ratchet_state_storage_round_trip() {
// Create a ratchet state with some conversation history.
let (ek_pk, ek_sk) = xwing::keygen().unwrap();
let rk: [u8; 32] = random::random_array();
let ck: [u8; 32] = random::random_array();
let fp_a = [0xAAu8; 32];
let fp_b = [0xBBu8; 32];
let mut alice = RatchetState::init_alice(rk, ck, fp_a, fp_b, ek_pk.clone(), ek_sk).unwrap();
let mut bob = RatchetState::init_bob(rk, ck, fp_b, fp_a, ek_pk).unwrap();
// Exchange some messages.
let enc = alice.encrypt(b"hello").unwrap();
bob.decrypt(&enc.header, &enc.ciphertext).unwrap();
let enc = bob.encrypt(b"world").unwrap();
alice.decrypt(&enc.header, &enc.ciphertext).unwrap();
// Serialize the ratchet state.
let alice_bytes = alice.to_bytes().unwrap().0;
// Encrypt as a storage blob.
let storage_key = StorageKey::new(1, random::random_array()).unwrap();
let blob = encrypt_blob(&storage_key, &alice_bytes, "session-store", "alice-1", true).unwrap();
// Decrypt the blob.
let ring = StorageKeyRing::new(storage_key).unwrap();
let decrypted = decrypt_blob(&ring, &blob, "session-store", "alice-1").unwrap();
// Verify serialization fidelity: the round-tripped bytes must be identical
// (excluding the epoch field, which advances on every to_bytes() call).
let alice2 = RatchetState::from_bytes(&decrypted).unwrap();
let alice2_bytes = alice2.to_bytes().unwrap().0;
assert_eq!(alice_bytes[0], alice2_bytes[0], "version must match");
assert_eq!(
alice_bytes[9..],
alice2_bytes[9..],
"ratchet state serialization round-trip must be identical (post-epoch fields)",
);
// Epoch must advance by exactly 1 per to_bytes() call.
let epoch1 = u64::from_be_bytes(alice_bytes[1..9].try_into().unwrap());
let epoch2 = u64::from_be_bytes(alice2_bytes[1..9].try_into().unwrap());
assert_eq!(epoch2, epoch1 + 1);
// Verify continued operation after deserialization.
let mut alice2 = RatchetState::from_bytes(&decrypted).unwrap();
let enc = alice2.encrypt(b"after storage").unwrap();
let pt = bob.decrypt(&enc.header, &enc.ciphertext).unwrap();
assert_eq!(&*pt, b"after storage");
}

View file

@ -0,0 +1,723 @@
//! Zeroization verification tests.
//!
//! Validates that secret key material is actually zeroed in memory after Drop
//! or explicit `.zeroize()` calls. Two techniques:
//!
//! - **`/proc/self/mem`** (Linux): reads process memory via the kernel to
//! inspect heap allocations after Drop without invoking Rust UB. Gated with
//! `#[cfg(all(target_os = "linux", not(miri)))]`.
//!
//! - **`std::ptr::read_volatile`**: reads stack memory after `.zeroize()` or
//! Drop, preventing the compiler from eliding the read. Gated `#[cfg(not(miri))]`.
//!
//! These tests are NOT run under MIRI — MIRI checks memory safety (UB, use-
//! after-free), while these tests check memory *content* (secrecy). MIRI runs
//! ~105 PQ-free tests separately using the `miri` nextest profile
//! (see `.config/nextest.toml`). These zeroization tests are excluded from MIRI.
//!
//! **Key insight:** `std::mem::drop(x)` moves `x` before calling `Drop::drop`,
//! so the *original* memory location retains the secret bytes. All tests use
//! `ManuallyDrop` + `unsafe { ManuallyDrop::drop(&mut md) }` which calls
//! `Drop::drop` in-place without moving.
use soliton::identity::{GeneratedIdentity, generate_identity};
use soliton::primitives::{mlkem, xwing};
use soliton::storage::{StorageKey, StorageKeyRing};
use std::mem::ManuallyDrop;
// ──────────────────────────────────────────────────────────────────────
// Helper: read process memory via /proc/self/mem (no UB).
// ──────────────────────────────────────────────────────────────────────
#[cfg(target_os = "linux")]
fn read_process_bytes(ptr: *const u8, len: usize) -> Vec<u8> {
use std::io::{Read, Seek, SeekFrom};
let mut f = std::fs::File::open("/proc/self/mem").unwrap();
f.seek(SeekFrom::Start(ptr as u64)).unwrap();
let mut buf = vec![0u8; len];
f.read_exact(&mut buf).unwrap();
buf
}
/// Pre-opened file handle + pre-allocated buffer for reading process memory
/// after a drop without triggering any heap allocations. This prevents the
/// allocator from reusing the just-freed region for the read buffer or file
/// handle internals, which would overwrite the zeroed memory before we check.
#[cfg(target_os = "linux")]
struct ProcMemReader {
file: std::fs::File,
buf: Vec<u8>,
}
#[cfg(target_os = "linux")]
impl ProcMemReader {
fn new(len: usize) -> Self {
Self {
file: std::fs::File::open("/proc/self/mem").unwrap(),
buf: vec![0u8; len],
}
}
fn read_at(&mut self, ptr: *const u8) -> &[u8] {
use std::io::{Read, Seek, SeekFrom};
self.file.seek(SeekFrom::Start(ptr as u64)).unwrap();
self.file.read_exact(&mut self.buf).unwrap();
&self.buf
}
}
/// After free(), glibc writes freelist metadata (fd/bk pointers, safe-linking
/// XOR'd pointers, fd_nextsize/bk_nextsize for large bins) into the beginning
/// of the freed allocation. Skip 64 bytes to cover all known glibc metadata
/// variants. For allocations > 64 bytes, the remaining region must be all zeros.
#[cfg(target_os = "linux")]
fn assert_heap_zeroed_with(reader: &mut ProcMemReader, ptr: *const u8, len: usize, label: &str) {
let after = reader.read_at(ptr);
let skip = 64.min(len);
let check = &after[skip..];
assert!(
check.iter().all(|&b| b == 0),
"{label}: found non-zero bytes after skip={skip} in {len}-byte allocation.\n\
First non-zero at offset {}: 0x{:02x}",
skip + check.iter().position(|&b| b != 0).unwrap_or(0),
check.iter().find(|&&b| b != 0).unwrap_or(&0),
);
}
/// Convenience wrapper that allocates its own reader. Use `assert_heap_zeroed_with`
/// with a pre-allocated `ProcMemReader` for flake-free post-drop checks.
#[cfg(target_os = "linux")]
fn assert_heap_zeroed(ptr: *const u8, len: usize, label: &str) {
let after = read_process_bytes(ptr, len);
let skip = 64.min(len);
let check = &after[skip..];
assert!(
check.iter().all(|&b| b == 0),
"{label}: found non-zero bytes after skip={skip} in {len}-byte allocation.\n\
First non-zero at offset {}: 0x{:02x}",
skip + check.iter().position(|&b| b != 0).unwrap_or(0),
check.iter().find(|&&b| b != 0).unwrap_or(&0),
);
}
// ──────────────────────────────────────────────────────────────────────
// Phase B: Heap zeroization — ZeroizeOnDrop types wrapping Vec<u8>
// ──────────────────────────────────────────────────────────────────────
#[test]
#[cfg(all(target_os = "linux", not(miri)))]
fn xwing_sk_zeroized_on_drop() {
let (_, sk) = xwing::keygen().unwrap();
let mut md = ManuallyDrop::new(sk);
let ptr = md.as_bytes().as_ptr();
let len = md.as_bytes().len();
assert_eq!(len, 2432, "unexpected xwing SK size");
// Pre-allocate the reader before drop so the post-drop read doesn't
// trigger heap allocations that could land on the just-freed region.
let mut reader = ProcMemReader::new(len);
let before = read_process_bytes(ptr, len);
assert!(
before.iter().any(|&b| b != 0),
"xwing SK was all zeros before drop"
);
// ManuallyDrop::drop calls Drop::drop in-place, then Vec deallocates.
unsafe { ManuallyDrop::drop(&mut md) };
assert_heap_zeroed_with(&mut reader, ptr, len, "xwing::SecretKey");
}
#[test]
#[cfg(all(target_os = "linux", not(miri)))]
fn identity_sk_zeroized_on_drop() {
let GeneratedIdentity { secret_key: sk, .. } = generate_identity().unwrap();
let mut md = ManuallyDrop::new(sk);
let ptr = md.as_bytes().as_ptr();
let len = md.as_bytes().len();
assert_eq!(len, 2496, "unexpected identity SK size");
let mut reader = ProcMemReader::new(len);
let before = read_process_bytes(ptr, len);
assert!(
before.iter().any(|&b| b != 0),
"identity SK was all zeros before drop"
);
unsafe { ManuallyDrop::drop(&mut md) };
assert_heap_zeroed_with(&mut reader, ptr, len, "IdentitySecretKey");
}
#[test]
#[cfg(not(miri))]
fn xwing_shared_secret_zeroized_on_drop() {
let (pk, _) = xwing::keygen().unwrap();
let (_, ss) = xwing::encapsulate(&pk).unwrap();
// SharedSecret wraps [u8; 32] — stack-allocated, not heap.
// Use read_volatile (not /proc/self/mem) for stack values.
let mut md = ManuallyDrop::new(ss);
let ptr = md.as_bytes().as_ptr();
let before = unsafe { std::ptr::read_volatile(ptr as *const [u8; 32]) };
assert!(
before.iter().any(|&b| b != 0),
"xwing SS was all zeros before drop"
);
unsafe { ManuallyDrop::drop(&mut md) };
let after = unsafe { std::ptr::read_volatile(ptr as *const [u8; 32]) };
assert_eq!(
after, [0u8; 32],
"xwing::SharedSecret not fully zeroized after drop"
);
}
#[test]
#[cfg(not(miri))]
fn mlkem_shared_secret_zeroized_on_drop() {
let (pk, _) = mlkem::keygen().unwrap();
let (_, ss) = mlkem::encapsulate(&pk).unwrap();
// SharedSecret wraps [u8; 32] — stack-allocated, not heap.
// Use read_volatile (not /proc/self/mem) for stack values.
let mut md = ManuallyDrop::new(ss);
let ptr = md.as_bytes().as_ptr();
let before = unsafe { std::ptr::read_volatile(ptr as *const [u8; 32]) };
assert!(
before.iter().any(|&b| b != 0),
"mlkem SS was all zeros before drop"
);
unsafe { ManuallyDrop::drop(&mut md) };
let after = unsafe { std::ptr::read_volatile(ptr as *const [u8; 32]) };
assert_eq!(
after, [0u8; 32],
"mlkem::SharedSecret not fully zeroized after drop"
);
}
#[test]
#[cfg(all(target_os = "linux", not(miri)))]
fn storage_keyring_zeroized_on_drop() {
use soliton::primitives::random;
let key1 = StorageKey::new(1, random::random_array()).unwrap();
let key2 = StorageKey::new(2, random::random_array()).unwrap();
let mut ring = StorageKeyRing::new(key1).unwrap();
ring.add_key(key2, true).unwrap();
// Capture pointers to individual key fields inside the HashMap's backing allocation.
let k1_ptr = ring.get_key(1).unwrap().key().as_ptr();
let k2_ptr = ring.get_key(2).unwrap().key().as_ptr();
let before1 = read_process_bytes(k1_ptr, 32);
let before2 = read_process_bytes(k2_ptr, 32);
assert!(
before1.iter().any(|&b| b != 0),
"key1 was all zeros before drop"
);
assert!(
before2.iter().any(|&b| b != 0),
"key2 was all zeros before drop"
);
// Use ManuallyDrop to call Drop::drop in-place. StorageKeyRing::Drop
// explicitly zeroizes each StorageKey (ZeroizeOnDrop fires on each entry's
// fields). After HashMap deallocation, allocator metadata may overwrite
// parts of the backing allocation, but key fields must have been zeroed
// before dealloc.
let mut md = ManuallyDrop::new(ring);
unsafe { ManuallyDrop::drop(&mut md) };
// KNOWN LIMITATION: OR assertion (not AND) — passes if *either* key slot
// is zeroed. HashMap bucket layout is hash-dependent; one slot may coincide
// with the region glibc overwrites with freelist metadata (fd/bk pointers,
// safe-linking XOR'd pointers) after free(). If allocator behavior changes
// such that both slots land in the metadata region, this test passes
// vacuously without verifying actual zeroization. A custom allocator or
// jemalloc-based test would close this gap but is out of scope.
let after1 = read_process_bytes(k1_ptr, 32);
let after2 = read_process_bytes(k2_ptr, 32);
let k1_zero = after1.iter().all(|&b| b == 0);
let k2_zero = after2.iter().all(|&b| b == 0);
assert!(
k1_zero || k2_zero,
"StorageKeyRing: neither key was fully zeroized after drop.\n\
key1 residue: {:?}\nkey2 residue: {:?}",
&after1[..8],
&after2[..8],
);
}
// ──────────────────────────────────────────────────────────────────────
// Phase C: Stack/field zeroization — protocol types with manual Drop
//
// All use ManuallyDrop to ensure Drop::drop runs in-place (no move).
// ──────────────────────────────────────────────────────────────────────
#[test]
#[cfg(not(miri))]
fn call_keys_drop_zeroizes() {
use soliton::call::derive_call_keys;
let rk = [0x01u8; 32];
let ss = [0x02u8; 32];
let call_id = [0x03u8; 16];
let fp_lo = [0x00u8; 32];
let fp_hi = [0xFFu8; 32];
let keys = derive_call_keys(&rk, &ss, &call_id, &fp_lo, &fp_hi).unwrap();
let mut md = ManuallyDrop::new(keys);
let send_ptr = md.send_key().as_ptr();
let recv_ptr = md.recv_key().as_ptr();
// Confirm non-zero before drop.
let send_before = unsafe { std::ptr::read_volatile(send_ptr as *const [u8; 32]) };
let recv_before = unsafe { std::ptr::read_volatile(recv_ptr as *const [u8; 32]) };
assert_ne!(send_before, [0u8; 32], "send_key was zero before drop");
assert_ne!(recv_before, [0u8; 32], "recv_key was zero before drop");
// Drop in-place — ZeroizeOnDrop (derived) calls Zeroize::zeroize, zeroing all fields.
unsafe { ManuallyDrop::drop(&mut md) };
let send_after = unsafe { std::ptr::read_volatile(send_ptr as *const [u8; 32]) };
let recv_after = unsafe { std::ptr::read_volatile(recv_ptr as *const [u8; 32]) };
assert_eq!(
send_after, [0u8; 32],
"CallKeys::send_key not zeroized after drop"
);
assert_eq!(
recv_after, [0u8; 32],
"CallKeys::recv_key not zeroized after drop"
);
}
#[test]
#[cfg(not(miri))]
fn call_keys_drop_after_advance_zeroizes() {
// Verifies ZeroizeOnDrop fires correctly on keys that have been through
// advance(). The advance-path Copy-gap zeroization — call.rs explicitly
// calls self.send_key.zeroize() before overwriting — is not externally
// verifiable: the field is immediately overwritten with the new key, so the
// old value is never observable from outside the struct. That zeroize call
// is defense-in-depth against compiler temporaries and must be verified via
// code review, not this test.
use soliton::call::derive_call_keys;
let rk = [0x01u8; 32];
let ss = [0x02u8; 32];
let call_id = [0x03u8; 16];
let fp_lo = [0x00u8; 32];
let fp_hi = [0xFFu8; 32];
let mut keys = derive_call_keys(&rk, &ss, &call_id, &fp_lo, &fp_hi).unwrap();
let old_send = *keys.send_key();
let old_recv = *keys.recv_key();
keys.advance().unwrap();
// After advance, new keys must differ from old.
assert_ne!(keys.send_key(), &old_send, "advance didn't change send_key");
assert_ne!(keys.recv_key(), &old_recv, "advance didn't change recv_key");
// Verify the final drop zeroizes the post-advance keys via ManuallyDrop.
let mut md = ManuallyDrop::new(keys);
let send_ptr = md.send_key().as_ptr();
let recv_ptr = md.recv_key().as_ptr();
unsafe { ManuallyDrop::drop(&mut md) };
let send_after = unsafe { std::ptr::read_volatile(send_ptr as *const [u8; 32]) };
let recv_after = unsafe { std::ptr::read_volatile(recv_ptr as *const [u8; 32]) };
assert_eq!(
send_after, [0u8; 32],
"CallKeys::send_key not zeroized after advance+drop"
);
assert_eq!(
recv_after, [0u8; 32],
"CallKeys::recv_key not zeroized after advance+drop"
);
}
#[test]
#[cfg(not(miri))]
fn stream_encryptor_drop_zeroizes() {
use soliton::streaming::stream_encrypt_init;
let key = [0x42u8; 32];
let enc = stream_encrypt_init(&key, b"", false).unwrap();
let mut md = ManuallyDrop::new(enc);
#[allow(deprecated)]
let key_ptr = md.key_ptr();
let before = unsafe { std::ptr::read_volatile(key_ptr as *const [u8; 32]) };
assert_eq!(before, [0x42u8; 32], "encryptor key wrong before drop");
unsafe { ManuallyDrop::drop(&mut md) };
let after = unsafe { std::ptr::read_volatile(key_ptr as *const [u8; 32]) };
assert_eq!(
after, [0u8; 32],
"StreamEncryptor::key not zeroized after drop"
);
}
#[test]
#[cfg(not(miri))]
fn stream_decryptor_drop_zeroizes() {
use soliton::streaming::{stream_decrypt_init, stream_encrypt_init};
let key = [0x42u8; 32];
let enc = stream_encrypt_init(&key, b"", false).unwrap();
let header = enc.header();
let dec = stream_decrypt_init(&key, &header, b"").unwrap();
let mut md = ManuallyDrop::new(dec);
#[allow(deprecated)]
let key_ptr = md.key_ptr();
let before = unsafe { std::ptr::read_volatile(key_ptr as *const [u8; 32]) };
assert_eq!(before, [0x42u8; 32], "decryptor key wrong before drop");
unsafe { ManuallyDrop::drop(&mut md) };
let after = unsafe { std::ptr::read_volatile(key_ptr as *const [u8; 32]) };
assert_eq!(
after, [0u8; 32],
"StreamDecryptor::key not zeroized after drop"
);
}
#[test]
#[cfg(not(miri))]
fn initiated_session_drop_zeroizes() {
use soliton::constants;
use soliton::kex::{PreKeyBundle, initiate_session, sign_prekey, verify_bundle};
let GeneratedIdentity {
public_key: alice_pk,
secret_key: alice_sk,
..
} = generate_identity().unwrap();
let GeneratedIdentity {
public_key: bob_pk,
secret_key: bob_sk,
..
} = generate_identity().unwrap();
let (spk_pk, _spk_sk) = xwing::keygen().unwrap();
let spk_sig = sign_prekey(&bob_sk, &spk_pk).unwrap();
let bundle = PreKeyBundle {
ik_pub: bob_pk.clone(),
crypto_version: constants::CRYPTO_VERSION.to_string(),
spk_pub: spk_pk,
spk_id: 1,
spk_sig,
opk_pub: None,
opk_id: None,
};
let vb = verify_bundle(bundle, &bob_pk).unwrap();
let initiated = initiate_session(&alice_pk, &alice_sk, &vb).unwrap();
let mut md = ManuallyDrop::new(initiated);
#[allow(deprecated)]
let rk_ptr = md.root_key_ptr();
#[allow(deprecated)]
let ck_ptr = md.initial_chain_key_ptr();
// Confirm non-zero before drop.
let rk_before = unsafe { std::ptr::read_volatile(rk_ptr as *const [u8; 32]) };
assert!(
rk_before.iter().any(|&b| b != 0),
"root_key was zero before drop"
);
let ck_before = unsafe { std::ptr::read_volatile(ck_ptr as *const [u8; 32]) };
assert!(
ck_before.iter().any(|&b| b != 0),
"initial_chain_key was zero before drop"
);
unsafe { ManuallyDrop::drop(&mut md) };
let rk_after = unsafe { std::ptr::read_volatile(rk_ptr as *const [u8; 32]) };
let ck_after = unsafe { std::ptr::read_volatile(ck_ptr as *const [u8; 32]) };
assert_eq!(
rk_after, [0u8; 32],
"InitiatedSession::root_key not zeroized after drop"
);
assert_eq!(
ck_after, [0u8; 32],
"InitiatedSession::initial_chain_key not zeroized after drop"
);
}
#[test]
#[cfg(not(miri))]
fn received_session_drop_zeroizes() {
use soliton::constants;
use soliton::kex::{
PreKeyBundle, initiate_session, receive_session, sign_prekey, verify_bundle,
};
let GeneratedIdentity {
public_key: alice_pk,
secret_key: alice_sk,
..
} = generate_identity().unwrap();
let GeneratedIdentity {
public_key: bob_pk,
secret_key: bob_sk,
..
} = generate_identity().unwrap();
let (spk_pk, spk_sk) = xwing::keygen().unwrap();
let spk_sig = sign_prekey(&bob_sk, &spk_pk).unwrap();
let bundle = PreKeyBundle {
ik_pub: bob_pk.clone(),
crypto_version: constants::CRYPTO_VERSION.to_string(),
spk_pub: spk_pk,
spk_id: 1,
spk_sig,
opk_pub: None,
opk_id: None,
};
let vb = verify_bundle(bundle, &bob_pk).unwrap();
let initiated = initiate_session(&alice_pk, &alice_sk, &vb).unwrap();
let received = receive_session(
&bob_pk,
&bob_sk,
&alice_pk,
&initiated.session_init,
&initiated.sender_sig,
&spk_sk,
None,
)
.unwrap();
let mut md = ManuallyDrop::new(received);
#[allow(deprecated)]
let rk_ptr = md.root_key_ptr();
#[allow(deprecated)]
let ck_ptr = md.initial_chain_key_ptr();
let rk_before = unsafe { std::ptr::read_volatile(rk_ptr as *const [u8; 32]) };
assert!(
rk_before.iter().any(|&b| b != 0),
"root_key was zero before drop"
);
let ck_before = unsafe { std::ptr::read_volatile(ck_ptr as *const [u8; 32]) };
assert!(
ck_before.iter().any(|&b| b != 0),
"initial_chain_key was zero before drop"
);
unsafe { ManuallyDrop::drop(&mut md) };
let rk_after = unsafe { std::ptr::read_volatile(rk_ptr as *const [u8; 32]) };
let ck_after = unsafe { std::ptr::read_volatile(ck_ptr as *const [u8; 32]) };
assert_eq!(
rk_after, [0u8; 32],
"ReceivedSession::root_key not zeroized after drop"
);
assert_eq!(
ck_after, [0u8; 32],
"ReceivedSession::initial_chain_key not zeroized after drop"
);
}
// ──────────────────────────────────────────────────────────────────────
// Phase D: RatchetState zeroization — memory content + observable behavior
// ──────────────────────────────────────────────────────────────────────
#[test]
#[cfg(not(miri))]
fn ratchet_state_drop_zeroizes_key_material() {
// Memory-content verification for RatchetState — the most security-critical
// type (holds root_key, send_epoch_key, recv_epoch_key). Uses the same
// ManuallyDrop + read_volatile pattern as other secret-key types to verify
// that Drop::drop (which delegates to reset()) actually zeros in-place.
let (ek_pk, ek_sk) = xwing::keygen().unwrap();
let rk = [0x11u8; 32];
let ck = [0x22u8; 32];
let fp_a = [0xAAu8; 32];
let fp_b = [0xBBu8; 32];
let alice =
soliton::ratchet::RatchetState::init_alice(rk, ck, fp_a, fp_b, ek_pk, ek_sk).unwrap();
let mut md = ManuallyDrop::new(alice);
#[allow(deprecated)]
let rk_ptr = md.root_key_ptr();
#[allow(deprecated)]
let sek_ptr = md.send_epoch_key_ptr();
#[allow(deprecated)]
let rek_ptr = md.recv_epoch_key_ptr();
// Confirm non-zero before drop.
let rk_before = unsafe { std::ptr::read_volatile(rk_ptr as *const [u8; 32]) };
let sek_before = unsafe { std::ptr::read_volatile(sek_ptr as *const [u8; 32]) };
assert!(
rk_before.iter().any(|&b| b != 0),
"root_key was zero before drop"
);
// send_epoch_key is the epoch_key passed to init_alice (non-zero).
assert!(
sek_before.iter().any(|&b| b != 0),
"send_epoch_key was zero before drop"
);
// recv_epoch_key is initialized to [0u8; 32] for Alice (Bob hasn't sent yet),
// so skip the non-zero pre-check for it — verify zeroization nonetheless.
unsafe { ManuallyDrop::drop(&mut md) };
let rk_after = unsafe { std::ptr::read_volatile(rk_ptr as *const [u8; 32]) };
let sek_after = unsafe { std::ptr::read_volatile(sek_ptr as *const [u8; 32]) };
let rek_after = unsafe { std::ptr::read_volatile(rek_ptr as *const [u8; 32]) };
assert_eq!(
rk_after, [0u8; 32],
"RatchetState::root_key not zeroized after drop"
);
assert_eq!(
sek_after, [0u8; 32],
"RatchetState::send_epoch_key not zeroized after drop"
);
assert_eq!(
rek_after, [0u8; 32],
"RatchetState::recv_epoch_key not zeroized after drop"
);
}
/// Helper: create a ratchet pair for testing.
fn make_ratchet_pair() -> (
soliton::ratchet::RatchetState,
soliton::ratchet::RatchetState,
[u8; 32],
[u8; 32],
) {
use soliton::primitives::random;
let (ek_pk, ek_sk) = xwing::keygen().unwrap();
let rk: [u8; 32] = random::random_array();
let ck: [u8; 32] = random::random_array();
let fp_a = [0xAAu8; 32];
let fp_b = [0xBBu8; 32];
let alice =
soliton::ratchet::RatchetState::init_alice(rk, ck, fp_a, fp_b, ek_pk.clone(), ek_sk)
.unwrap();
let bob = soliton::ratchet::RatchetState::init_bob(rk, ck, fp_b, fp_a, ek_pk).unwrap();
(alice, bob, fp_a, fp_b)
}
#[test]
fn ratchet_reset_then_encrypt_fails() {
let (mut alice, _, _fp_a, _fp_b) = make_ratchet_pair();
assert!(alice.encrypt(b"test").is_ok());
alice.reset();
assert!(alice.encrypt(b"test").is_err());
}
#[test]
fn ratchet_recv_seen_cleared_on_reset() {
let (mut alice, mut bob, _fp_a, _fp_b) = make_ratchet_pair();
// Bob receives msg2 out of order — recv_seen tracks the counter.
let _enc0 = alice.encrypt(b"msg0").unwrap();
let _enc1 = alice.encrypt(b"msg1").unwrap();
let enc2 = alice.encrypt(b"msg2").unwrap();
bob.decrypt(&enc2.header, &enc2.ciphertext).unwrap();
bob.reset();
// After reset, recv_seen and all epoch keys should be cleared.
// The state is unusable (root_key zeroed).
assert!(bob.encrypt(b"test").is_err());
}
#[test]
fn ratchet_aead_failure_no_state_leak() {
let (mut alice, mut bob, _fp_a, _fp_b) = make_ratchet_pair();
let enc = alice.encrypt(b"good message").unwrap();
let mut bad_ct = enc.ciphertext.clone();
bad_ct[0] ^= 0xFF;
assert!(bob.decrypt(&enc.header, &bad_ct).is_err());
// State rolled back — valid message still works.
let pt = bob.decrypt(&enc.header, &enc.ciphertext).unwrap();
assert_eq!(&*pt, b"good message");
}
// ──────────────────────────────────────────────────────────────────────
// Phase E: Foundational sanity checks
// ──────────────────────────────────────────────────────────────────────
#[test]
#[cfg(not(miri))]
fn zeroizing_array_drop_zeros() {
// Dependency regression canary: if the `zeroize` crate ever ships a version
// that doesn't actually zero on Drop (optimizer regression, feature-gate
// change, etc.), this test catches it before any higher-level zeroization
// test can be affected. All other tests in this file depend on Zeroizing
// working correctly — this one verifies that assumption directly.
use zeroize::Zeroizing;
let secret = Zeroizing::new([0xAAu8; 32]);
// Use ManuallyDrop to avoid the move in std::mem::drop.
let mut md = ManuallyDrop::new(secret);
let ptr = md.as_ptr();
let before = unsafe { std::ptr::read_volatile(ptr as *const [u8; 32]) };
assert_eq!(before, [0xAAu8; 32]);
// Drop in-place — Zeroizing's Drop zeros the inner value.
unsafe { ManuallyDrop::drop(&mut md) };
let after = unsafe { std::ptr::read_volatile(ptr as *const [u8; 32]) };
assert_eq!(
after, [0u8; 32],
"Zeroizing<[u8; 32]> not zeroed after drop"
);
}
#[test]
#[cfg(all(target_os = "linux", not(miri)))]
fn zeroizing_vec_drop_zeros() {
use zeroize::Zeroizing;
let secret = Zeroizing::new(vec![0xBBu8; 256]);
let mut md = ManuallyDrop::new(secret);
let ptr = md.as_ptr();
let len = md.len();
let before = read_process_bytes(ptr, len);
assert!(before.iter().all(|&b| b == 0xBB));
unsafe { ManuallyDrop::drop(&mut md) };
assert_heap_zeroed(ptr, len, "Zeroizing<Vec<u8>>");
}
#[test]
#[cfg(not(miri))]
fn storage_key_zeroized_on_drop() {
use soliton::primitives::random;
let key = StorageKey::new(1, random::random_array()).unwrap();
let mut md = ManuallyDrop::new(key);
let ptr = md.key().as_ptr();
let before = unsafe { std::ptr::read_volatile(ptr as *const [u8; 32]) };
assert!(
before.iter().any(|&b| b != 0),
"StorageKey was zero before drop"
);
unsafe { ManuallyDrop::drop(&mut md) };
let after = unsafe { std::ptr::read_volatile(ptr as *const [u8; 32]) };
assert_eq!(after, [0u8; 32], "StorageKey::key not zeroized after drop");
}
// ──────────────────────────────────────────────────────────────────────
// Phase F: Copy-gap pattern validation
// ──────────────────────────────────────────────────────────────────────
#[test]
#[cfg(not(miri))]
fn copy_gap_pattern_validated() {
use zeroize::Zeroize;
// Demonstrate the Copy-gap and its mitigation.
// 1. Create a secret [u8; 32] value.
let mut original = [0xCCu8; 32];
let original_ptr = original.as_ptr();
// 2. Copy into Zeroizing (Copy semantics — original unchanged).
let wrapper = zeroize::Zeroizing::new(original);
// original still holds 0xCC — this is the "copy gap".
assert_eq!(
unsafe { std::ptr::read_volatile(original_ptr as *const [u8; 32]) },
[0xCCu8; 32],
"original should still hold secret after copy into Zeroizing",
);
// 3. Explicitly zeroize the original (as the codebase does).
original.zeroize();
assert_eq!(
unsafe { std::ptr::read_volatile(original_ptr as *const [u8; 32]) },
[0u8; 32],
"original not zeroized after explicit .zeroize()",
);
// 4. Zeroizing wrapper still holds the value.
assert_eq!(*wrapper, [0xCCu8; 32]);
// 5. Drop the wrapper via ManuallyDrop (in-place, no move).
let mut md = ManuallyDrop::new(wrapper);
let wrapper_ptr = md.as_ptr();
unsafe { ManuallyDrop::drop(&mut md) };
let after = unsafe { std::ptr::read_volatile(wrapper_ptr as *const [u8; 32]) };
assert_eq!(after, [0u8; 32], "Zeroizing wrapper not zeroed after drop");
}