diff --git a/bin/testapp/src/lib.rs b/bin/testapp/src/lib.rs index 3fdf229..88900d9 100644 --- a/bin/testapp/src/lib.rs +++ b/bin/testapp/src/lib.rs @@ -418,7 +418,8 @@ mod tests { token_account_id: AccountId, account_id: AccountId, ) -> u128 { - let mut key = token_account_id.as_bytes().to_vec(); + let mut key = vec![evolve_core::runtime_api::ACCOUNT_STORAGE_PREFIX]; + key.extend_from_slice(&token_account_id.as_bytes()); key.push(1u8); key.extend(account_id.encode().expect("encode account id")); diff --git a/bin/testapp/src/main.rs b/bin/testapp/src/main.rs index 1ff196d..ba29232 100644 --- a/bin/testapp/src/main.rs +++ b/bin/testapp/src/main.rs @@ -315,7 +315,8 @@ mod tests { token_account_id: AccountId, account_id: AccountId, ) -> u128 { - let mut key = token_account_id.as_bytes().to_vec(); + let mut key = vec![evolve_core::runtime_api::ACCOUNT_STORAGE_PREFIX]; + key.extend_from_slice(&token_account_id.as_bytes()); key.push(1u8); // Token::balances storage prefix key.extend(account_id.encode().expect("encode account id")); diff --git a/bin/testapp/tests/mempool_e2e.rs b/bin/testapp/tests/mempool_e2e.rs index 498d3c8..57617d6 100644 --- a/bin/testapp/tests/mempool_e2e.rs +++ b/bin/testapp/tests/mempool_e2e.rs @@ -275,18 +275,21 @@ impl AsyncMockStorage { /// Initialize an EthEoaAccount's storage (nonce and eth_address). fn init_eth_eoa_storage(&self, account_id: AccountId, eth_address: [u8; 20]) { - // Storage keys are: account_id + prefix (u8) + use evolve_core::runtime_api::ACCOUNT_STORAGE_PREFIX; + // Storage keys are: ACCOUNT_STORAGE_PREFIX + account_id + prefix (u8) // Item::new(0) = nonce, Item::new(1) = eth_address let mut data = self.data.write().unwrap(); - let mut nonce_key = account_id.as_bytes().to_vec(); + let mut nonce_key = vec![ACCOUNT_STORAGE_PREFIX]; + nonce_key.extend_from_slice(&account_id.as_bytes()); nonce_key.push(0u8); data.insert( nonce_key, Message::new(&0u64).unwrap().into_bytes().unwrap(), ); - let mut addr_key = account_id.as_bytes().to_vec(); + let mut addr_key = vec![ACCOUNT_STORAGE_PREFIX]; + addr_key.extend_from_slice(&account_id.as_bytes()); addr_key.push(1u8); data.insert( addr_key, @@ -296,18 +299,21 @@ impl AsyncMockStorage { /// Initialize an Ed25519AuthAccount's storage (nonce and public key). fn init_ed25519_auth_storage(&self, account_id: AccountId, public_key: [u8; 32]) { - // Storage keys are: account_id + prefix (u8) + use evolve_core::runtime_api::ACCOUNT_STORAGE_PREFIX; + // Storage keys are: ACCOUNT_STORAGE_PREFIX + account_id + prefix (u8) // Item::new(0) = nonce, Item::new(1) = public key let mut data = self.data.write().unwrap(); - let mut nonce_key = account_id.as_bytes().to_vec(); + let mut nonce_key = vec![ACCOUNT_STORAGE_PREFIX]; + nonce_key.extend_from_slice(&account_id.as_bytes()); nonce_key.push(0u8); data.insert( nonce_key, Message::new(&0u64).unwrap().into_bytes().unwrap(), ); - let mut pubkey_key = account_id.as_bytes().to_vec(); + let mut pubkey_key = vec![ACCOUNT_STORAGE_PREFIX]; + pubkey_key.extend_from_slice(&account_id.as_bytes()); pubkey_key.push(1u8); data.insert( pubkey_key, @@ -317,7 +323,9 @@ impl AsyncMockStorage { /// Set token balance directly in storage for a specific account. fn set_token_balance(&self, token_account_id: AccountId, account_id: AccountId, balance: u128) { - let mut key = token_account_id.as_bytes().to_vec(); + use evolve_core::runtime_api::ACCOUNT_STORAGE_PREFIX; + let mut key = vec![ACCOUNT_STORAGE_PREFIX]; + key.extend_from_slice(&token_account_id.as_bytes()); key.push(1u8); // Token::balances storage prefix key.extend(account_id.encode().expect("encode account id")); let value = Message::new(&balance).unwrap().into_bytes().unwrap(); @@ -417,9 +425,11 @@ fn create_signed_tx( } fn read_nonce(storage: &S, account_id: AccountId) -> u64 { + use evolve_core::runtime_api::ACCOUNT_STORAGE_PREFIX; use evolve_core::Message; - let mut nonce_key = account_id.as_bytes().to_vec(); + let mut nonce_key = vec![ACCOUNT_STORAGE_PREFIX]; + nonce_key.extend_from_slice(&account_id.as_bytes()); nonce_key.push(0u8); match storage.get(&nonce_key).expect("read nonce") { Some(value) => Message::from_bytes(value) @@ -437,7 +447,8 @@ fn read_token_balance( use evolve_core::encoding::Encodable; use evolve_core::Message; - let mut key = token_account_id.as_bytes().to_vec(); + let mut key = vec![evolve_core::runtime_api::ACCOUNT_STORAGE_PREFIX]; + key.extend_from_slice(&token_account_id.as_bytes()); key.push(1u8); // Token::balances storage prefix key.extend(account_id.encode().expect("encode account id")); diff --git a/bin/txload/src/main.rs b/bin/txload/src/main.rs index 096e82f..dfcebf5 100644 --- a/bin/txload/src/main.rs +++ b/bin/txload/src/main.rs @@ -26,8 +26,8 @@ struct Args { #[arg(long, default_value = "http://127.0.0.1:8545")] rpc_url: String, - /// Chain ID used for signing EIP-1559 transactions - #[arg(long, default_value_t = 1)] + /// Chain ID used for signing EIP-1559 transactions (default matches Evolve's DEFAULT_CHAIN_ID) + #[arg(long, default_value_t = 900_901)] chain_id: u64, /// Token account Ethereum address (0x...) that exposes `transfer` diff --git a/crates/app/node/src/config.rs b/crates/app/node/src/config.rs index a8ab105..b4b7f58 100644 --- a/crates/app/node/src/config.rs +++ b/crates/app/node/src/config.rs @@ -124,7 +124,8 @@ pub struct ChainConfig { impl Default for ChainConfig { fn default() -> Self { - Self { chain_id: 1 } + // 900_901 is deliberately not a live EVM network to prevent cross-chain replay. + Self { chain_id: 900_901 } } } @@ -227,7 +228,7 @@ mod tests { .extract() .expect("figment extract failed"); - assert_eq!(loaded.chain.chain_id, 1); + assert_eq!(loaded.chain.chain_id, 900_901); assert_eq!(loaded.storage.path, DEFAULT_DATA_DIR); assert_eq!(loaded.rpc.http_addr, DEFAULT_RPC_ADDR); assert_eq!(loaded.grpc.addr, DEFAULT_GRPC_ADDR); diff --git a/crates/app/node/src/lib.rs b/crates/app/node/src/lib.rs index 945ad99..17dc99f 100644 --- a/crates/app/node/src/lib.rs +++ b/crates/app/node/src/lib.rs @@ -223,6 +223,8 @@ type RuntimeContext = TokioContext; /// subsystem — all produced blocks must be persisted. async fn build_block_archive(context: TokioContext) -> OnBlockArchive { let config = BlockStorageConfig::default(); + let retention = config.retention_blocks; + let prune_interval = config.blocks_per_section; let store = BlockStorage::new(context, config) .await .expect("failed to initialize block archive storage"); @@ -236,10 +238,18 @@ async fn build_block_archive(context: TokioContext) -> OnBlockArchive { if let Err(e) = store.put_sync(block_number, block_hash, block_bytes).await { tracing::warn!("Failed to archive block {}: {:?}", block_number, e); } + + // Prune old blocks at section boundaries to bound disk usage. + if retention > 0 && block_number > retention && block_number % prune_interval == 0 { + let min_block = block_number.saturating_sub(retention); + if let Err(e) = store.prune(min_block).await { + tracing::warn!(min_block, "Failed to prune block archive: {:?}", e); + } + } } }); - tracing::info!("Block archive storage enabled"); + tracing::info!(retention, "Block archive storage enabled"); Arc::new(move |block_number, block_hash, block_bytes| { let hash_bytes = ArchiveBlockHash::new(block_hash.0); diff --git a/crates/app/sdk/collections/Cargo.toml b/crates/app/sdk/collections/Cargo.toml index 9403d82..676c3e0 100644 --- a/crates/app/sdk/collections/Cargo.toml +++ b/crates/app/sdk/collections/Cargo.toml @@ -20,3 +20,4 @@ error-decode = ["linkme", "evolve_core/error-decode"] [dev-dependencies] proptest = "1.4" +evolve_testing = { workspace = true, features = ["proptest"] } diff --git a/crates/app/sdk/collections/src/prop_tests.rs b/crates/app/sdk/collections/src/prop_tests.rs index 0d2bc6c..e0defc4 100644 --- a/crates/app/sdk/collections/src/prop_tests.rs +++ b/crates/app/sdk/collections/src/prop_tests.rs @@ -9,36 +9,13 @@ use crate::queue::Queue; use crate::unordered_map::UnorderedMap; use crate::vector::Vector; use crate::ERR_EMPTY; +use crate::ERR_NOT_FOUND; +use evolve_testing::proptest_config::proptest_config; use proptest::prelude::*; -use std::collections::{HashMap, VecDeque}; +use std::collections::{BTreeMap, HashMap, VecDeque}; const MAX_OPS: usize = 32; const MAX_KEYS: usize = 16; -const DEFAULT_CASES: u32 = 128; -const CI_CASES: u32 = 32; - -fn proptest_cases() -> u32 { - if let Ok(value) = std::env::var("EVOLVE_PROPTEST_CASES") { - if let Ok(parsed) = value.parse::() { - if parsed > 0 { - return parsed; - } - } - } - - if std::env::var("EVOLVE_CI").is_ok() || std::env::var("CI").is_ok() { - return CI_CASES; - } - - DEFAULT_CASES -} - -fn proptest_config() -> proptest::test_runner::Config { - proptest::test_runner::Config { - cases: proptest_cases(), - ..Default::default() - } -} proptest! { #![proptest_config(proptest_config())] @@ -216,3 +193,76 @@ proptest! { prop_assert_eq!(actual_pairs, expected_pairs); } } + +// ============================================================================ +// Map model-based test +// ============================================================================ + +#[derive(Clone, Debug)] +enum MapOp { + Set { key: u64, value: u64 }, + Get { key: u64 }, + Remove { key: u64 }, + Exists { key: u64 }, +} + +fn map_ops_strategy() -> impl Strategy> { + let keys: Vec = (0..MAX_KEYS as u64).collect(); + + let set = (proptest::sample::select(keys.clone()), any::()) + .prop_map(|(key, value)| MapOp::Set { key, value }); + let get = proptest::sample::select(keys.clone()).prop_map(|key| MapOp::Get { key }); + let remove = proptest::sample::select(keys.clone()).prop_map(|key| MapOp::Remove { key }); + let exists = proptest::sample::select(keys).prop_map(|key| MapOp::Exists { key }); + + let op = prop_oneof![4 => set, 2 => get, 2 => remove, 1 => exists]; + proptest::collection::vec(op, 0..=MAX_OPS) +} + +proptest! { + #![proptest_config(proptest_config())] + + #[test] + fn prop_map_matches_model(ops in map_ops_strategy()) { + let map: Map = Map::new(50); + let mut env = MockEnvironment::new(1, 2); + let mut model: BTreeMap = BTreeMap::new(); + + for op in ops { + match op { + MapOp::Set { key, value } => { + map.set(&key, &value, &mut env).unwrap(); + model.insert(key, value); + } + MapOp::Get { key } => { + let actual = map.may_get(&key, &mut env).unwrap(); + let expected = model.get(&key).copied(); + prop_assert_eq!(actual, expected); + + // Also verify get() returns ERR_NOT_FOUND for missing keys + if expected.is_none() { + prop_assert_eq!(map.get(&key, &mut env).unwrap_err(), ERR_NOT_FOUND); + } else { + prop_assert_eq!(map.get(&key, &mut env).unwrap(), expected.unwrap()); + } + } + MapOp::Remove { key } => { + map.remove(&key, &mut env).unwrap(); + model.remove(&key); + } + MapOp::Exists { key } => { + let actual = map.exists(&key, &mut env).unwrap(); + let expected = model.contains_key(&key); + prop_assert_eq!(actual, expected); + } + } + } + + // Final state: verify all keys match the model + for key in 0..MAX_KEYS as u64 { + let expected = model.get(&key).copied(); + let actual = map.may_get(&key, &mut env).unwrap(); + prop_assert_eq!(actual, expected); + } + } +} diff --git a/crates/app/sdk/core/src/runtime_api.rs b/crates/app/sdk/core/src/runtime_api.rs index 3816227..20c753e 100644 --- a/crates/app/sdk/core/src/runtime_api.rs +++ b/crates/app/sdk/core/src/runtime_api.rs @@ -3,6 +3,7 @@ use crate::{AccountId, InvokableMessage, InvokeRequest, InvokeResponse}; use borsh::{BorshDeserialize, BorshSerialize}; pub const ACCOUNT_IDENTIFIER_PREFIX: u8 = 0; pub const ACCOUNT_IDENTIFIER_SINGLETON_PREFIX: u8 = 1; +pub const ACCOUNT_STORAGE_PREFIX: u8 = 2; pub const RUNTIME_ACCOUNT_ID: AccountId = AccountId::from_u64(0); /// Storage key for consensus parameters. diff --git a/crates/app/sdk/testing/Cargo.toml b/crates/app/sdk/testing/Cargo.toml index 1505423..45cc222 100644 --- a/crates/app/sdk/testing/Cargo.toml +++ b/crates/app/sdk/testing/Cargo.toml @@ -9,6 +9,11 @@ rust-version.workspace = true [dependencies] evolve_core.workspace = true evolve_stf_traits.workspace = true +proptest = { version = "1.4", optional = true } + +[features] +default = [] +proptest = ["dep:proptest"] [lints] workspace = true diff --git a/crates/app/sdk/testing/src/lib.rs b/crates/app/sdk/testing/src/lib.rs index 3484a4a..3d12191 100644 --- a/crates/app/sdk/testing/src/lib.rs +++ b/crates/app/sdk/testing/src/lib.rs @@ -3,9 +3,13 @@ // Testing code - determinism requirements do not apply. #![allow(clippy::disallowed_types)] +#[cfg(feature = "proptest")] +pub mod proptest_config; + pub mod server_mocks; use evolve_core::encoding::{Decodable, Encodable}; +use evolve_core::runtime_api::ACCOUNT_STORAGE_PREFIX; use evolve_core::storage_api::{ StorageGetRequest, StorageGetResponse, StorageRemoveRequest, StorageRemoveResponse, StorageSetRequest, StorageSetResponse, STORAGE_ACCOUNT_ID, @@ -111,7 +115,8 @@ impl MockEnv { StorageSetRequest::FUNCTION_IDENTIFIER => { let storage_set: StorageSetRequest = request.get()?; - let mut key = self.whoami.as_bytes().to_vec(); + let mut key = vec![ACCOUNT_STORAGE_PREFIX]; + key.extend_from_slice(&self.whoami.as_bytes()); key.extend(storage_set.key); self.state.insert(key, storage_set.value.as_vec()?); @@ -120,7 +125,8 @@ impl MockEnv { } StorageRemoveRequest::FUNCTION_IDENTIFIER => { let storage_remove: StorageRemoveRequest = request.get()?; - let mut key = self.whoami.as_bytes().to_vec(); + let mut key = vec![ACCOUNT_STORAGE_PREFIX]; + key.extend_from_slice(&self.whoami.as_bytes()); key.extend(storage_remove.key); self.state.remove(&key); Ok(InvokeResponse::new(&StorageRemoveResponse {})?) @@ -134,7 +140,8 @@ impl MockEnv { StorageGetRequest::FUNCTION_IDENTIFIER => { let storage_get: StorageGetRequest = request.get()?; - let mut key = storage_get.account_id.as_bytes().to_vec(); + let mut key = vec![ACCOUNT_STORAGE_PREFIX]; + key.extend_from_slice(&storage_get.account_id.as_bytes()); key.extend(storage_get.key); let value = self.state.get(&key).cloned(); diff --git a/crates/app/sdk/testing/src/proptest_config.rs b/crates/app/sdk/testing/src/proptest_config.rs new file mode 100644 index 0000000..426d006 --- /dev/null +++ b/crates/app/sdk/testing/src/proptest_config.rs @@ -0,0 +1,38 @@ +//! Shared property-test configuration for the Evolve workspace. +//! +//! Provides a single source of truth for case counts so every crate +//! respects `EVOLVE_PROPTEST_CASES`, CI detection, and a sensible local +//! default without duplicating the logic. + +const DEFAULT_CASES: u32 = 128; +const CI_CASES: u32 = 32; + +/// Return the number of proptest cases to run. +/// +/// Priority: +/// 1. `EVOLVE_PROPTEST_CASES` env var (must parse to a positive `u32`). +/// 2. `CI` or `EVOLVE_CI` env var present → [`CI_CASES`]. +/// 3. Otherwise → [`DEFAULT_CASES`]. +pub fn proptest_cases() -> u32 { + if let Ok(value) = std::env::var("EVOLVE_PROPTEST_CASES") { + if let Ok(parsed) = value.parse::() { + if parsed > 0 { + return parsed; + } + } + } + + if std::env::var("EVOLVE_CI").is_ok() || std::env::var("CI").is_ok() { + return CI_CASES; + } + + DEFAULT_CASES +} + +/// Build a [`proptest::test_runner::Config`] using [`proptest_cases`]. +pub fn proptest_config() -> proptest::test_runner::Config { + proptest::test_runner::Config { + cases: proptest_cases(), + ..Default::default() + } +} diff --git a/crates/app/stf/Cargo.toml b/crates/app/stf/Cargo.toml index e30800f..a1d80bb 100644 --- a/crates/app/stf/Cargo.toml +++ b/crates/app/stf/Cargo.toml @@ -17,6 +17,7 @@ linkme = {version = "0.3", default-features = false, optional = true} [dev-dependencies] proptest = "1.4" +evolve_testing = { workspace = true, features = ["proptest"] } [lints] workspace = true diff --git a/crates/app/stf/src/execution_state.rs b/crates/app/stf/src/execution_state.rs index 4b4c824..04114e3 100644 --- a/crates/app/stf/src/execution_state.rs +++ b/crates/app/stf/src/execution_state.rs @@ -558,36 +558,12 @@ mod tests { use super::*; // bring in the Checkpoint and StateChange use evolve_core::{ErrorCode, Message, ReadonlyKV}; + use evolve_testing::proptest_config::proptest_config; use proptest::prelude::*; use std::collections::HashMap; const MAX_OPS: usize = 64; const MAX_KEYS: u8 = 16; - const DEFAULT_CASES: u32 = 128; - const CI_CASES: u32 = 32; - - fn proptest_cases() -> u32 { - if let Ok(value) = std::env::var("EVOLVE_PROPTEST_CASES") { - if let Ok(parsed) = value.parse::() { - if parsed > 0 { - return parsed; - } - } - } - - if std::env::var("EVOLVE_CI").is_ok() || std::env::var("CI").is_ok() { - return CI_CASES; - } - - DEFAULT_CASES - } - - fn proptest_config() -> proptest::test_runner::Config { - proptest::test_runner::Config { - cases: proptest_cases(), - ..Default::default() - } - } #[derive(Clone, Debug)] enum Op { diff --git a/crates/app/stf/src/gas.rs b/crates/app/stf/src/gas.rs index 277a151..e3d2fcf 100644 --- a/crates/app/stf/src/gas.rs +++ b/crates/app/stf/src/gas.rs @@ -279,34 +279,10 @@ impl GasCounter { mod tests { use super::*; use evolve_core::Message; + use evolve_testing::proptest_config::proptest_config; use proptest::prelude::*; const MAX_OPS: usize = 64; - const DEFAULT_CASES: u32 = 128; - const CI_CASES: u32 = 32; - - fn proptest_cases() -> u32 { - if let Ok(value) = std::env::var("EVOLVE_PROPTEST_CASES") { - if let Ok(parsed) = value.parse::() { - if parsed > 0 { - return parsed; - } - } - } - - if std::env::var("EVOLVE_CI").is_ok() || std::env::var("CI").is_ok() { - return CI_CASES; - } - - DEFAULT_CASES - } - - fn proptest_config() -> proptest::test_runner::Config { - proptest::test_runner::Config { - cases: proptest_cases(), - ..Default::default() - } - } #[derive(Clone, Debug)] enum Op { diff --git a/crates/app/stf/src/handlers.rs b/crates/app/stf/src/handlers.rs index 7a3a735..fdce0d3 100644 --- a/crates/app/stf/src/handlers.rs +++ b/crates/app/stf/src/handlers.rs @@ -12,7 +12,7 @@ use crate::invoker::Invoker; use crate::runtime_api_impl; use evolve_core::runtime_api::{ CreateAccountRequest, CreateAccountResponse, MigrateRequest, RegisterAccountAtIdRequest, - RegisterAccountAtIdResponse, RUNTIME_ACCOUNT_ID, + RegisterAccountAtIdResponse, ACCOUNT_STORAGE_PREFIX, RUNTIME_ACCOUNT_ID, }; use evolve_core::storage_api::{ StorageGetRequest, StorageGetResponse, StorageRemoveRequest, StorageRemoveResponse, @@ -120,7 +120,8 @@ pub fn handle_storage_exec( StorageSetRequest::FUNCTION_IDENTIFIER => { let storage_set: StorageSetRequest = request.get()?; - let mut key = invoker.whoami.as_bytes().to_vec(); + let mut key = vec![ACCOUNT_STORAGE_PREFIX]; + key.extend_from_slice(&invoker.whoami.as_bytes()); key.extend(storage_set.key); // increase gas costs @@ -133,7 +134,8 @@ pub fn handle_storage_exec( } StorageRemoveRequest::FUNCTION_IDENTIFIER => { let storage_remove: StorageRemoveRequest = request.get()?; - let mut key = invoker.whoami.as_bytes().to_vec(); + let mut key = vec![ACCOUNT_STORAGE_PREFIX]; + key.extend_from_slice(&invoker.whoami.as_bytes()); key.extend(storage_remove.key); invoker.gas_counter.consume_remove_gas(&key)?; invoker.storage.remove(&key)?; @@ -151,7 +153,8 @@ pub fn handle_storage_query( StorageGetRequest::FUNCTION_IDENTIFIER => { let storage_get: StorageGetRequest = request.get()?; - let mut key = storage_get.account_id.as_bytes().to_vec(); + let mut key = vec![ACCOUNT_STORAGE_PREFIX]; + key.extend_from_slice(&storage_get.account_id.as_bytes()); key.extend(storage_get.key); let value = invoker.storage.get(&key)?; diff --git a/crates/app/stf/src/lib.rs b/crates/app/stf/src/lib.rs index 70bab39..bfec190 100644 --- a/crates/app/stf/src/lib.rs +++ b/crates/app/stf/src/lib.rs @@ -94,43 +94,21 @@ pub struct Stf { mod model_tests { use super::*; use borsh::{BorshDeserialize, BorshSerialize}; - use evolve_core::runtime_api::ACCOUNT_IDENTIFIER_PREFIX; + use evolve_core::runtime_api::{ACCOUNT_IDENTIFIER_PREFIX, ACCOUNT_STORAGE_PREFIX}; use evolve_core::storage_api::{StorageSetRequest, STORAGE_ACCOUNT_ID}; use evolve_core::{ErrorCode, FungibleAsset, Message}; use evolve_stf_traits::{ AccountsCodeStorage, BeginBlocker as BeginBlockerTrait, Block as BlockTrait, EndBlocker, PostTxExecution, Transaction, TxValidator, WritableKV, }; + use evolve_testing::proptest_config::proptest_config; use hashbrown::HashMap; use proptest::prelude::*; - const DEFAULT_CASES: u32 = 32; - const CI_CASES: u32 = 8; const MAX_TXS: usize = 16; const TEST_ACCOUNT_ID: AccountId = AccountId::from_u64(100); const DEFAULT_SENDER_ID: AccountId = AccountId::from_u64(200); - fn proptest_cases() -> u32 { - if let Ok(value) = std::env::var("EVOLVE_PROPTEST_CASES") { - if let Ok(parsed) = value.parse::() { - if parsed > 0 { - return parsed; - } - } - } - if std::env::var("EVOLVE_CI").is_ok() || std::env::var("CI").is_ok() { - return CI_CASES; - } - DEFAULT_CASES - } - - fn proptest_config() -> proptest::test_runner::Config { - proptest::test_runner::Config { - cases: proptest_cases(), - ..Default::default() - } - } - #[derive(Clone, Debug, BorshSerialize, BorshDeserialize)] struct TestMsg { key: Vec, @@ -348,11 +326,21 @@ mod model_tests { } fn account_storage_key(account: AccountId, key: &[u8]) -> Vec { - let mut out = account.as_bytes().to_vec(); + let mut out = vec![ACCOUNT_STORAGE_PREFIX]; + out.extend_from_slice(&account.as_bytes()); out.extend_from_slice(key); out } + /// Model the gas a single set(key, value) costs, matching `GasCounter::consume_set_gas`. + fn model_set_gas(config: &StorageGasConfig, storage_key: &[u8], value: &[u8]) -> u64 { + let total_size = (storage_key.len() as u64) + .saturating_add(1) + .saturating_add(value.len() as u64) + .saturating_add(1); + config.storage_set_charge.saturating_mul(total_size) + } + fn account_code_key(account: AccountId) -> Vec { let mut out = vec![ACCOUNT_IDENTIFIER_PREFIX]; out.extend_from_slice(&account.as_bytes()); @@ -495,13 +483,7 @@ mod model_tests { }); let storage_key = account_storage_key(test_account, &key); - // Use storage_set_charge for set operations - let gas_needed = gas_config.storage_set_charge.saturating_mul( - (storage_key.len() as u64) - .saturating_add(1) - .saturating_add(value.len() as u64) - .saturating_add(1), - ); + let gas_needed = model_set_gas(&gas_config, &storage_key, &value); let out_of_gas = gas_needed > gas_limit; if !fail_validate && !fail_after_write && !out_of_gas { @@ -518,13 +500,7 @@ mod model_tests { let tx = &block.txs[idx]; let msg: TestMsg = tx.request.get().unwrap(); let storage_key = account_storage_key(tx.recipient, &msg.key); - // Use storage_set_charge for set operations - let gas_needed = gas_config.storage_set_charge.saturating_mul( - (storage_key.len() as u64) - .saturating_add(1) - .saturating_add(msg.value.len() as u64) - .saturating_add(1), - ); + let gas_needed = model_set_gas(&gas_config, &storage_key, &msg.value); let out_of_gas = gas_needed > tx.gas_limit; if tx.fail_validate { diff --git a/crates/app/tx/eth/Cargo.toml b/crates/app/tx/eth/Cargo.toml index b142890..655ddac 100644 --- a/crates/app/tx/eth/Cargo.toml +++ b/crates/app/tx/eth/Cargo.toml @@ -39,6 +39,8 @@ sha3 = { workspace = true } k256 = { version = "0.13", features = ["ecdsa"], optional = true } [dev-dependencies] +# Enable testing feature so integration tests can access sign_hash +evolve_tx_eth = { path = ".", features = ["testing"] } # For testing with real signatures k256 = { version = "0.13", features = ["ecdsa", "arithmetic"] } # For hex test vectors @@ -49,6 +51,8 @@ proptest = "1.5" rand = "0.8" # For TxDecoder trait in tests evolve_stf_traits = { workspace = true } +# Shared test config +evolve_testing = { workspace = true, features = ["proptest"] } [lints] workspace = true diff --git a/crates/app/tx/eth/src/eoa_registry.rs b/crates/app/tx/eth/src/eoa_registry.rs index 27a13f7..0e278d2 100644 --- a/crates/app/tx/eth/src/eoa_registry.rs +++ b/crates/app/tx/eth/src/eoa_registry.rs @@ -2,7 +2,7 @@ use crate::error::ERR_ADDRESS_ACCOUNT_CONFLICT; use crate::traits::{derive_eth_eoa_account_id, derive_runtime_contract_address}; use alloy_primitives::Address; use evolve_core::low_level::{exec_account, query_account, register_account_at_id}; -use evolve_core::runtime_api::RUNTIME_ACCOUNT_ID; +use evolve_core::runtime_api::{ACCOUNT_STORAGE_PREFIX, RUNTIME_ACCOUNT_ID}; use evolve_core::storage_api::{ StorageGetRequest, StorageGetResponse, StorageSetRequest, StorageSetResponse, STORAGE_ACCOUNT_ID, @@ -93,7 +93,8 @@ pub fn lookup_account_id_in_storage( storage: &S, address: Address, ) -> SdkResult> { - let mut full_key = RUNTIME_ACCOUNT_ID.as_bytes().to_vec(); + let mut full_key = vec![ACCOUNT_STORAGE_PREFIX]; + full_key.extend_from_slice(&RUNTIME_ACCOUNT_ID.as_bytes()); full_key.extend_from_slice(&addr_to_id_key(address)); match storage.get(&full_key)? { Some(raw) => Ok(Some(decode_account_id(Message::from_bytes(raw))?)), @@ -123,7 +124,8 @@ pub fn lookup_contract_account_id_in_storage( storage: &S, address: Address, ) -> SdkResult> { - let mut full_key = RUNTIME_ACCOUNT_ID.as_bytes().to_vec(); + let mut full_key = vec![ACCOUNT_STORAGE_PREFIX]; + full_key.extend_from_slice(&RUNTIME_ACCOUNT_ID.as_bytes()); full_key.extend_from_slice(&contract_addr_to_id_key(address)); match storage.get(&full_key)? { Some(raw) => Ok(Some(decode_account_id(Message::from_bytes(raw))?)), @@ -135,7 +137,8 @@ pub fn lookup_address_in_storage( storage: &S, account_id: AccountId, ) -> SdkResult> { - let mut full_key = RUNTIME_ACCOUNT_ID.as_bytes().to_vec(); + let mut full_key = vec![ACCOUNT_STORAGE_PREFIX]; + full_key.extend_from_slice(&RUNTIME_ACCOUNT_ID.as_bytes()); full_key.extend_from_slice(&id_to_addr_key(account_id)); match storage.get(&full_key)? { Some(raw) => { diff --git a/crates/app/tx/eth/src/mempool.rs b/crates/app/tx/eth/src/mempool.rs index b5e16b7..224c1ae 100644 --- a/crates/app/tx/eth/src/mempool.rs +++ b/crates/app/tx/eth/src/mempool.rs @@ -17,7 +17,7 @@ use crate::eoa_registry::{ ensure_eoa_mapping, lookup_account_id_in_env, lookup_contract_account_id_in_env, resolve_or_create_eoa_account, ETH_EOA_CODE_ID, }; -use crate::error::{ERR_RECIPIENT_REQUIRED, ERR_TX_DECODE}; +use crate::error::{ERR_RECIPIENT_REQUIRED, ERR_SENDER_MISMATCH, ERR_TX_DECODE}; use crate::payload::{EthIntentPayload, TxPayload}; use crate::sender_type; use crate::traits::{derive_eth_eoa_account_id, TypedTransaction}; @@ -457,7 +457,19 @@ impl Decodable for TxContext { } let payload = match wire.payload { - TxPayloadWire::Eoa(raw) => TxPayload::Eoa(Box::new(TxEnvelope::decode(&raw)?)), + TxPayloadWire::Eoa(raw) => { + let envelope = TxEnvelope::decode(&raw)?; + // SECURITY: verify the wire-provided sender matches the + // secp256k1-recovered sender from the envelope. Without + // this check an attacker can set sender_eth_address to any + // victim address while signing with a different key. + let recovered = envelope.sender(); + let claimed = wire.sender_eth_address.ok_or(ERR_TX_DECODE)?; + if recovered != Address::from(claimed) { + return Err(ERR_SENDER_MISMATCH); + } + TxPayload::Eoa(Box::new(envelope)) + } TxPayloadWire::Custom(raw) => TxPayload::Custom(raw), }; @@ -686,6 +698,145 @@ mod tests { assert_eq!(resolved, contract_id); } + /// Build a raw `ctx1` wire payload whose EOA envelope was signed by + /// `signing_key` but whose `sender_eth_address` field is set to + /// `spoofed_sender`. This simulates the sender-spoofing attack. + fn build_spoofed_eoa_wire( + signing_key: &SigningKey, + to: Address, + spoofed_sender: [u8; 20], + ) -> Vec { + let tx = TxLegacy { + chain_id: Some(1), + nonce: 0, + gas_price: 1_000_000_000, + gas_limit: 21_000, + to: TxKind::Call(to), + value: U256::ZERO, + input: Bytes::new(), + }; + let signature = sign_hash(signing_key, tx.signature_hash()); + let signed = tx.into_signed(signature); + let mut rlp = Vec::new(); + signed.rlp_encode(&mut rlp); + + // The real sender recovered from the envelope. + let envelope = TxEnvelope::decode(&rlp).expect("decode signed tx"); + let real_sender = envelope.sender(); + + // Derive a minimal but valid invoke request from the envelope. + let invoke_request = envelope + .to_invoke_requests() + .into_iter() + .next() + .expect("invoke request"); + + let wire = TxContextWireV1 { + sender_type: sender_type::EOA_SECP256K1, + payload: TxPayloadWire::Eoa(rlp), + tx_hash: envelope.tx_hash().0, + gas_limit: envelope.gas_limit(), + nonce: envelope.nonce(), + chain_id: envelope.chain_id(), + effective_gas_price: 1_000_000_000, + invoke_request, + funds: vec![], + sender_account: derive_eth_eoa_account_id(real_sender), + recipient_account: None, + sender_key: real_sender.as_slice().to_vec(), + authentication_payload: Message::new(&real_sender.into_array()).expect("auth payload"), + // Lie: claim the spoofed victim address as sender. + sender_eth_address: Some(spoofed_sender), + recipient_eth_address: Some(to.into()), + }; + + let mut encoded = TX_CONTEXT_WIRE_MAGIC.to_vec(); + encoded.extend(borsh::to_vec(&wire).expect("borsh encode wire")); + encoded + } + + #[test] + fn eoa_wire_spoofed_sender_is_rejected() { + let signing_key = SigningKey::random(&mut OsRng); + let to = Address::repeat_byte(0xBB); + let victim: [u8; 20] = [0xDE; 20]; + + // Recover the real sender so we can assert it differs from the victim. + let real_sender: [u8; 20] = { + let tx = TxLegacy { + chain_id: Some(1), + nonce: 0, + gas_price: 1_000_000_000, + gas_limit: 21_000, + to: TxKind::Call(to), + value: U256::ZERO, + input: Bytes::new(), + }; + let sig = sign_hash(&signing_key, tx.signature_hash()); + let signed = tx.into_signed(sig); + let mut rlp = Vec::new(); + signed.rlp_encode(&mut rlp); + TxEnvelope::decode(&rlp).expect("decode").sender().into() + }; + // Guard: the test is only meaningful when the victim differs from real signer. + assert_ne!( + real_sender, victim, + "victim must differ from real signer for this test" + ); + + let wire_bytes = build_spoofed_eoa_wire(&signing_key, to, victim); + let result = TxContext::decode(&wire_bytes); + + assert!( + result.is_err(), + "decode must reject a wire payload where sender_eth_address does not match the recovered secp256k1 sender" + ); + assert_eq!( + result.unwrap_err(), + ERR_SENDER_MISMATCH, + "expected ERR_SENDER_MISMATCH, got a different error" + ); + } + + #[test] + fn eoa_wire_correct_sender_is_accepted() { + // A legitimately-crafted EOA wire payload (sender matches recovered key) + // must still decode successfully after the fix. + let signing_key = SigningKey::random(&mut OsRng); + let to = Address::repeat_byte(0xCC); + + let real_sender: [u8; 20] = { + let tx = TxLegacy { + chain_id: Some(1), + nonce: 0, + gas_price: 1_000_000_000, + gas_limit: 21_000, + to: TxKind::Call(to), + value: U256::ZERO, + input: Bytes::new(), + }; + let sig = sign_hash(&signing_key, tx.signature_hash()); + let signed = tx.into_signed(sig); + let mut rlp = Vec::new(); + signed.rlp_encode(&mut rlp); + TxEnvelope::decode(&rlp).expect("decode").sender().into() + }; + + // Build a wire payload with the correct sender. + let wire_bytes = build_spoofed_eoa_wire(&signing_key, to, real_sender); + let result = TxContext::decode(&wire_bytes); + assert!( + result.is_ok(), + "decode must accept a wire payload with a matching sender_eth_address" + ); + let ctx = result.unwrap(); + assert_eq!( + ctx.sender_address().map(<[u8; 20]>::from), + Some(real_sender), + "sender resolution must point to the recovered address" + ); + } + #[test] fn roundtrip_custom_payload_context() { let request = diff --git a/crates/app/tx/eth/tests/proptest_tests.rs b/crates/app/tx/eth/tests/proptest_tests.rs index 21b4712..caa6931 100644 --- a/crates/app/tx/eth/tests/proptest_tests.rs +++ b/crates/app/tx/eth/tests/proptest_tests.rs @@ -1,7 +1,8 @@ //! Property-based tests for transaction encoding/decoding roundtrips. use alloy_consensus::{SignableTransaction, TxEip1559, TxLegacy}; -use alloy_primitives::{Address, Bytes, U256}; +use alloy_primitives::{Address, Bytes, TxKind, U256}; +use evolve_testing::proptest_config::proptest_config; use evolve_tx_eth::{tx_type, TxEnvelope, TypedTransaction}; use k256::ecdsa::{SigningKey, VerifyingKey}; use proptest::prelude::*; @@ -31,7 +32,18 @@ fn arb_bytes(max_len: usize) -> impl Strategy { } fn arb_u256() -> impl Strategy { - prop::array::uniform32(any::()).prop_map(|bytes| U256::from_be_bytes(bytes)) + prop_oneof![ + 3 => prop::array::uniform32(any::()).prop_map(U256::from_be_bytes), + 1 => Just(U256::ZERO), + 1 => Just(U256::MAX), + ] +} + +fn arb_tx_kind() -> impl Strategy { + prop_oneof![ + 4 => arb_address().prop_map(TxKind::Call), + 1 => Just(TxKind::Create), + ] } fn arb_legacy_tx() -> impl Strategy { @@ -39,7 +51,7 @@ fn arb_legacy_tx() -> impl Strategy { any::(), // nonce 1u128..1_000_000_000_000u128, // gas_price (reasonable range) 21000u64..1_000_000u64, // gas_limit - arb_address(), // to + arb_tx_kind(), // to arb_u256(), // value arb_bytes(256), // input (smaller for faster tests) ) @@ -48,7 +60,7 @@ fn arb_legacy_tx() -> impl Strategy { nonce, gas_price, gas_limit, - to: alloy_primitives::TxKind::Call(to), + to, value, input, }) @@ -60,7 +72,7 @@ fn arb_eip1559_tx() -> impl Strategy { 21000u64..1_000_000u64, // gas_limit 1u128..100_000_000_000u128, // max_fee_per_gas 1u128..10_000_000_000u128, // max_priority_fee - arb_address(), // to + arb_tx_kind(), // to arb_u256(), // value arb_bytes(256), // input ) @@ -71,7 +83,7 @@ fn arb_eip1559_tx() -> impl Strategy { gas_limit, max_fee_per_gas: max_fee, max_priority_fee_per_gas: max_priority.min(max_fee), // priority <= max - to: alloy_primitives::TxKind::Call(to), + to, value, input, access_list: Default::default(), @@ -84,7 +96,7 @@ fn arb_eip1559_tx() -> impl Strategy { // ============================================================================ proptest! { - #![proptest_config(ProptestConfig::with_cases(20))] + #![proptest_config(proptest_config())] /// Property: Encoding then decoding a legacy transaction preserves all fields #[test] @@ -112,8 +124,9 @@ proptest! { prop_assert_eq!(decoded.value(), tx.value); prop_assert_eq!(decoded.input(), tx.input.as_ref()); - if let alloy_primitives::TxKind::Call(to) = tx.to { - prop_assert_eq!(decoded.to(), Some(to)); + match tx.to { + TxKind::Call(to) => prop_assert_eq!(decoded.to(), Some(to)), + TxKind::Create => prop_assert_eq!(decoded.to(), None), } } @@ -143,8 +156,9 @@ proptest! { prop_assert_eq!(decoded.value(), tx.value); prop_assert_eq!(decoded.input(), tx.input.as_ref()); - if let alloy_primitives::TxKind::Call(to) = tx.to { - prop_assert_eq!(decoded.to(), Some(to)); + match tx.to { + TxKind::Call(to) => prop_assert_eq!(decoded.to(), Some(to)), + TxKind::Create => prop_assert_eq!(decoded.to(), None), } } @@ -242,7 +256,7 @@ impl TxModel { } proptest! { - #![proptest_config(ProptestConfig::with_cases(15))] + #![proptest_config(proptest_config())] /// Model test: TxEnvelope preserves the model #[test] diff --git a/crates/rpc/chain-index/Cargo.toml b/crates/rpc/chain-index/Cargo.toml index 0ca4f60..609697c 100644 --- a/crates/rpc/chain-index/Cargo.toml +++ b/crates/rpc/chain-index/Cargo.toml @@ -40,6 +40,7 @@ r2d2_sqlite = "0.32" [dev-dependencies] proptest = "1.4" tempfile = "3" +evolve_testing = { workspace = true, features = ["proptest"] } [lints] workspace = true diff --git a/crates/rpc/chain-index/src/index.rs b/crates/rpc/chain-index/src/index.rs index 43b57ee..5b635c2 100644 --- a/crates/rpc/chain-index/src/index.rs +++ b/crates/rpc/chain-index/src/index.rs @@ -172,12 +172,10 @@ impl PersistentChainIndex { miner BLOB NOT NULL, extra_data BLOB NOT NULL ); - CREATE INDEX IF NOT EXISTS idx_blocks_hash ON blocks(hash); CREATE TABLE IF NOT EXISTS transactions ( hash BLOB PRIMARY KEY, block_number INTEGER NOT NULL, - block_hash BLOB NOT NULL, transaction_index INTEGER NOT NULL, from_addr BLOB NOT NULL, to_addr BLOB, @@ -200,7 +198,6 @@ impl PersistentChainIndex { CREATE TABLE IF NOT EXISTS receipts ( transaction_hash BLOB PRIMARY KEY, transaction_index INTEGER NOT NULL, - block_hash BLOB NOT NULL, block_number INTEGER NOT NULL, from_addr BLOB NOT NULL, to_addr BLOB, @@ -209,6 +206,7 @@ impl PersistentChainIndex { contract_address BLOB, status INTEGER NOT NULL, tx_type INTEGER NOT NULL, + revert_reason TEXT, FOREIGN KEY (block_number) REFERENCES blocks(number) ); CREATE INDEX IF NOT EXISTS idx_receipts_block ON receipts(block_number); @@ -231,6 +229,21 @@ impl PersistentChainIndex { value BLOB NOT NULL );", )?; + + // Migration: add revert_reason column to existing receipts tables that + // were created before this column existed. We only ignore the + // "duplicate column" error; other failures (I/O, corruption) are + // propagated so startup fails visibly. + match conn.execute_batch("ALTER TABLE receipts ADD COLUMN revert_reason TEXT;") { + Ok(()) => {} + Err(rusqlite::Error::SqliteFailure(_, Some(ref msg))) + if msg.contains("duplicate column") => + { + // Column already exists — nothing to do. + } + Err(e) => return Err(e.into()), + } + Ok(()) } @@ -280,6 +293,10 @@ impl PersistentChainIndex { }) } + /// Parse a transaction row. Expected column order: + /// t.hash, t.block_number, b.hash (block_hash from JOIN), t.transaction_index, + /// t.from_addr, t.to_addr, t.value, t.gas, t.gas_price, t.input, t.nonce, + /// t.v, t.r, t.s, t.tx_type, t.chain_id, t.max_fee_per_gas, t.max_priority_fee_per_gas fn row_to_stored_transaction(row: &rusqlite::Row<'_>) -> rusqlite::Result { use alloy_primitives::{Bytes, U256}; @@ -329,6 +346,10 @@ impl PersistentChainIndex { }) } + /// Parse a receipt row. Expected column order: + /// r.transaction_hash, r.transaction_index, b.hash (block_hash from JOIN), + /// r.block_number, r.from_addr, r.to_addr, r.cumulative_gas_used, r.gas_used, + /// r.contract_address, r.status, r.tx_type, t.gas_price, r.revert_reason fn row_to_stored_receipt(row: &rusqlite::Row<'_>) -> rusqlite::Result { let transaction_hash_bytes: Vec = row.get(0)?; let transaction_index: i64 = row.get(1)?; @@ -342,6 +363,7 @@ impl PersistentChainIndex { let status: i64 = row.get(9)?; let tx_type: i64 = row.get(10)?; let effective_gas_price_bytes: Vec = row.get(11)?; + let revert_reason: Option = row.get(12)?; let to = to_bytes .as_deref() @@ -366,6 +388,7 @@ impl PersistentChainIndex { logs: vec![], // logs are stored separately status: status as u8, tx_type: tx_type as u8, + revert_reason, }) } } @@ -457,10 +480,12 @@ impl ChainIndex for PersistentChainIndex { let conn = self.read_conn()?; let result = conn.query_row( - "SELECT hash, block_number, block_hash, transaction_index, from_addr, to_addr, - value, gas, gas_price, input, nonce, v, r, s, tx_type, chain_id, - max_fee_per_gas, max_priority_fee_per_gas - FROM transactions WHERE hash = ?", + "SELECT t.hash, t.block_number, b.hash, t.transaction_index, t.from_addr, t.to_addr, + t.value, t.gas, t.gas_price, t.input, t.nonce, t.v, t.r, t.s, t.tx_type, + t.chain_id, t.max_fee_per_gas, t.max_priority_fee_per_gas + FROM transactions t + INNER JOIN blocks b ON b.number = t.block_number + WHERE t.hash = ?", params![hash.as_slice()], Self::row_to_stored_transaction, ); @@ -500,14 +525,15 @@ impl ChainIndex for PersistentChainIndex { let conn = self.read_conn()?; let result = conn.query_row( - "SELECT receipts.transaction_hash, receipts.transaction_index, receipts.block_hash, - receipts.block_number, receipts.from_addr, receipts.to_addr, - receipts.cumulative_gas_used, receipts.gas_used, - receipts.contract_address, receipts.status, receipts.tx_type, - transactions.gas_price - FROM receipts - INNER JOIN transactions ON transactions.hash = receipts.transaction_hash - WHERE receipts.transaction_hash = ?", + "SELECT r.transaction_hash, r.transaction_index, b.hash, + r.block_number, r.from_addr, r.to_addr, + r.cumulative_gas_used, r.gas_used, + r.contract_address, r.status, r.tx_type, + t.gas_price, r.revert_reason + FROM receipts r + INNER JOIN transactions t ON t.hash = r.transaction_hash + INNER JOIN blocks b ON b.number = r.block_number + WHERE r.transaction_hash = ?", params![hash.as_slice()], Self::row_to_stored_receipt, ); @@ -632,14 +658,13 @@ fn insert_transaction( ) -> ChainIndexResult<()> { tx.execute( "INSERT OR REPLACE INTO transactions - (hash, block_number, block_hash, transaction_index, from_addr, to_addr, + (hash, block_number, transaction_index, from_addr, to_addr, value, gas, gas_price, input, nonce, v, r, s, tx_type, chain_id, max_fee_per_gas, max_priority_fee_per_gas) - VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", params![ transaction.hash.as_slice(), transaction.block_number as i64, - transaction.block_hash.as_slice(), array_index, transaction.from.as_slice(), transaction.to.as_ref().map(|a| a.as_slice()), @@ -671,13 +696,12 @@ fn insert_receipt( ) -> ChainIndexResult<()> { tx.execute( "INSERT OR REPLACE INTO receipts - (transaction_hash, transaction_index, block_hash, block_number, from_addr, to_addr, - cumulative_gas_used, gas_used, contract_address, status, tx_type) + (transaction_hash, transaction_index, block_number, from_addr, to_addr, + cumulative_gas_used, gas_used, contract_address, status, tx_type, revert_reason) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", params![ receipt.transaction_hash.as_slice(), array_index, - receipt.block_hash.as_slice(), receipt.block_number as i64, receipt.from.as_slice(), receipt.to.as_ref().map(|a| a.as_slice()), @@ -686,6 +710,7 @@ fn insert_receipt( receipt.contract_address.as_ref().map(|a| a.as_slice()), receipt.status as i64, receipt.tx_type as i64, + receipt.revert_reason.as_deref(), ], )?; Ok(()) @@ -864,6 +889,7 @@ mod tests { logs: vec![], status: if success { 1 } else { 0 }, tx_type: 0, + revert_reason: None, } } @@ -1009,6 +1035,7 @@ mod model_tests { }; use super::*; use alloy_primitives::Address; + use evolve_testing::proptest_config::proptest_config; use proptest::prelude::*; use std::collections::HashMap; @@ -1183,6 +1210,8 @@ mod model_tests { } proptest! { + #![proptest_config(proptest_config())] + /// Model-based test: verify that PersistentChainIndex behaves identically to the reference model. #[test] fn prop_chain_index_matches_model(operations in arb_operations(30)) { diff --git a/crates/rpc/chain-index/src/integration.rs b/crates/rpc/chain-index/src/integration.rs index 31515fc..fe6bd96 100644 --- a/crates/rpc/chain-index/src/integration.rs +++ b/crates/rpc/chain-index/src/integration.rs @@ -329,6 +329,10 @@ fn build_stored_receipt( let to = resolve_recipient_address(tx); let logs: Vec = tx_result.events.iter().map(event_to_stored_log).collect(); let status = if tx_result.response.is_ok() { 1 } else { 0 }; + let revert_reason = match &tx_result.response { + Err(err) => Some(format!("ErrorCode(id=0x{:04x}, arg={})", err.id, err.arg)), + Ok(_) => None, + }; StoredReceipt { transaction_hash: tx_hash, @@ -347,6 +351,7 @@ fn build_stored_receipt( logs, status, tx_type: eth_fields.as_ref().map(|f| f.tx_type).unwrap_or(0), + revert_reason, } } diff --git a/crates/rpc/chain-index/src/provider.rs b/crates/rpc/chain-index/src/provider.rs index 9ecb5cf..a2c5910 100644 --- a/crates/rpc/chain-index/src/provider.rs +++ b/crates/rpc/chain-index/src/provider.rs @@ -602,13 +602,28 @@ impl ChainStateProvider Result, RpcError> { + // Reject inverted ranges immediately (matches go-ethereum behavior). + if from_block > to_block { + return Err(RpcError::InvalidParams(format!( + "fromBlock ({}) is greater than toBlock ({})", + from_block, to_block + ))); + } + let mut result = Vec::new(); - // Limit range to prevent DoS - let max_blocks = 1000u64; - let actual_to = to_block.min(from_block.saturating_add(max_blocks)); + // Reject oversized ranges rather than silently truncating (matches Geth behavior). + const MAX_BLOCK_RANGE: u64 = 1000; + // Maximum log entries returned in a single getLogs call. + const MAX_LOGS: usize = 10_000; + let range = to_block.saturating_sub(from_block).saturating_add(1); + if range > MAX_BLOCK_RANGE { + return Err(RpcError::InvalidParams(format!( + "block range ({range}) exceeds maximum ({MAX_BLOCK_RANGE}); narrow the filter range" + ))); + } - for block_num in from_block..=actual_to { + for block_num in from_block..=to_block { // Get block for hash let block = match self.index.get_block(block_num)? { Some(b) => b, @@ -667,6 +682,12 @@ impl ChainStateProvider= MAX_LOGS { + return Err(RpcError::InvalidParams(format!( + "query returned more than {} results; narrow the filter range", + MAX_LOGS + ))); + } result.push(stored_log.to_rpc_log( block_num, block.hash, @@ -878,6 +899,7 @@ mod tests { logs: vec![], status: 1, tx_type: 0, + revert_reason: None, } } diff --git a/crates/rpc/chain-index/src/querier.rs b/crates/rpc/chain-index/src/querier.rs index 06afcdb..cec1aa8 100644 --- a/crates/rpc/chain-index/src/querier.rs +++ b/crates/rpc/chain-index/src/querier.rs @@ -8,6 +8,7 @@ use alloy_primitives::{Address, Bytes, U256}; use async_trait::async_trait; use evolve_core::encoding::Encodable; +use evolve_core::runtime_api::ACCOUNT_STORAGE_PREFIX; use evolve_core::{AccountId, Message, ReadonlyKV}; use evolve_eth_jsonrpc::error::RpcError; use evolve_rpc_types::CallRequest; @@ -53,7 +54,8 @@ impl StorageStateQuerier { } fn read_nonce(&self, account_id: AccountId) -> Result { - let mut key = account_id.as_bytes().to_vec(); + let mut key = vec![ACCOUNT_STORAGE_PREFIX]; + key.extend_from_slice(&account_id.as_bytes()); key.push(0u8); // EthEoaAccount::nonce storage prefix match self .storage @@ -68,7 +70,8 @@ impl StorageStateQuerier { } fn read_balance(&self, account_id: AccountId) -> Result { - let mut key = self.token_account_id.as_bytes().to_vec(); + let mut key = vec![ACCOUNT_STORAGE_PREFIX]; + key.extend_from_slice(&self.token_account_id.as_bytes()); key.push(1u8); // Token::balances storage prefix key.extend( account_id diff --git a/crates/rpc/chain-index/src/types.rs b/crates/rpc/chain-index/src/types.rs index be03da6..f418bfd 100644 --- a/crates/rpc/chain-index/src/types.rs +++ b/crates/rpc/chain-index/src/types.rs @@ -169,6 +169,9 @@ pub struct StoredReceipt { pub status: u8, /// Transaction type. pub tx_type: u8, + /// Revert reason for failed transactions. + #[serde(skip_serializing_if = "Option::is_none")] + pub revert_reason: Option, } impl StoredReceipt { @@ -204,6 +207,7 @@ impl StoredReceipt { logs_bloom: Bytes::new(), tx_type: U64::from(self.tx_type as u64), status: U64::from(self.status as u64), + revert_reason: self.revert_reason.clone(), } } } diff --git a/crates/rpc/eth-jsonrpc/src/lib.rs b/crates/rpc/eth-jsonrpc/src/lib.rs index 0421083..72e1fe5 100644 --- a/crates/rpc/eth-jsonrpc/src/lib.rs +++ b/crates/rpc/eth-jsonrpc/src/lib.rs @@ -11,7 +11,7 @@ //! #[tokio::main] //! async fn main() { //! let config = RpcServerConfig { -//! chain_id: 1, +//! chain_id: 900_901, //! ..Default::default() //! }; //! let handle = start_server(config, NoopStateProvider).await.unwrap(); diff --git a/crates/rpc/eth-jsonrpc/src/server.rs b/crates/rpc/eth-jsonrpc/src/server.rs index 64bc909..a29c1e1 100644 --- a/crates/rpc/eth-jsonrpc/src/server.rs +++ b/crates/rpc/eth-jsonrpc/src/server.rs @@ -35,11 +35,19 @@ pub struct RpcServerConfig { pub chain_id: u64, } +/// Default chain ID for the Evolve network. +/// +/// Deliberately chosen to be distinct from any live EVM network to prevent +/// cross-chain replay of transactions signed against a default configuration. +/// Override via `RpcServerConfig { chain_id: , .. }` or the +/// `EVOLVE_CHAIN__CHAIN_ID` environment variable when running a production node. +pub const DEFAULT_CHAIN_ID: u64 = 900_901; + impl Default for RpcServerConfig { fn default() -> Self { Self { http_addr: "127.0.0.1:8545".parse().unwrap(), - chain_id: 1, + chain_id: DEFAULT_CHAIN_ID, } } } @@ -434,6 +442,21 @@ impl EthApiServer for EthRpcServer { } async fn send_raw_transaction(&self, data: Bytes) -> Result { + // Reject empty or oversized payloads before passing to the verifier. + const MAX_RAW_TX_SIZE: usize = 128 * 1024; // 128 KiB + if data.is_empty() { + return Err(ErrorObjectOwned::from(RpcError::InvalidTransaction( + "empty transaction".to_string(), + ))); + } + if data.len() > MAX_RAW_TX_SIZE { + return Err(ErrorObjectOwned::from(RpcError::InvalidTransaction( + format!( + "transaction exceeds maximum size of {} bytes", + MAX_RAW_TX_SIZE + ), + ))); + } self.state .send_raw_transaction(data.as_ref()) .await @@ -492,8 +515,16 @@ impl EthApiServer for EthRpcServer { _newest_block: BlockNumberOrTag, _reward_percentiles: Option>, ) -> Result { - // Return zero fees for the requested block count - let count = block_count.to::().min(1024); + // Per EIP-1559 and go-ethereum: block_count must be in [1, 1024]. + const MAX_FEE_HISTORY_BLOCKS: u64 = 1024; + let count_raw = block_count.to::(); + if count_raw == 0 || count_raw > MAX_FEE_HISTORY_BLOCKS { + return Err(ErrorObjectOwned::from(RpcError::InvalidParams(format!( + "block count must be between 1 and {}", + MAX_FEE_HISTORY_BLOCKS + )))); + } + let count = count_raw as usize; Ok(FeeHistory { oldest_block: U64::ZERO, base_fee_per_gas: vec![U256::ZERO; count + 1], @@ -538,6 +569,14 @@ impl Web3ApiServer for EthRpcServer { } async fn sha3(&self, data: Bytes) -> Result { + // Reject absurdly large inputs to prevent CPU/allocation DoS. + const MAX_SHA3_INPUT: usize = 128 * 1024; // 128 KiB + if data.len() > MAX_SHA3_INPUT { + return Err(ErrorObjectOwned::from(RpcError::InvalidParams(format!( + "input exceeds maximum allowed size of {} bytes", + MAX_SHA3_INPUT + )))); + } use sha3::{Digest, Keccak256}; let mut hasher = Keccak256::new(); hasher.update(data.as_ref()); @@ -599,11 +638,13 @@ impl EthPubSubApiServer for EthRpcServer { "newPendingTransactions" => SubscriptionKind::NewPendingTransactions, "syncing" => SubscriptionKind::Syncing, _ => { - // Reject unknown subscription types + // Reject unknown subscription types. + // Truncate the reflected kind string to avoid echoing arbitrary user input. + let safe_kind: String = kind.chars().take(64).collect(); pending .reject(jsonrpsee::types::ErrorObject::owned( -32602, - format!("Unknown subscription type: {}", kind), + format!("Unknown subscription type: {}", safe_kind), None::<()>, )) .await; @@ -1177,26 +1218,60 @@ mod tests { // Tests boundary condition (capping at 1024) #[tokio::test] - async fn test_fee_history_capped_at_1024() { + async fn test_fee_history_rejects_over_1024() { let provider = MockStateProvider::new().with_block_number(100); let server = EthRpcServer::new(RpcServerConfig::default(), provider); - // Request more than 1024 blocks - should be capped - let result = EthApiServer::fee_history( + // Request more than 1024 blocks - should be rejected + let err = EthApiServer::fee_history( &server, U64::from(2000), BlockNumberOrTag::Tag(BlockTag::Latest), None, ) .await + .unwrap_err(); + + assert_eq!(err.code(), jsonrpsee::types::error::INVALID_PARAMS_CODE); + assert!(err + .message() + .contains("block count must be between 1 and 1024")); + } + + #[tokio::test] + async fn test_fee_history_rejects_zero() { + let provider = MockStateProvider::new().with_block_number(100); + let server = EthRpcServer::new(RpcServerConfig::default(), provider); + + let err = EthApiServer::fee_history( + &server, + U64::ZERO, + BlockNumberOrTag::Tag(BlockTag::Latest), + None, + ) + .await + .unwrap_err(); + + assert_eq!(err.code(), jsonrpsee::types::error::INVALID_PARAMS_CODE); + } + + #[tokio::test] + async fn test_fee_history_at_max() { + let provider = MockStateProvider::new().with_block_number(2000); + let server = EthRpcServer::new(RpcServerConfig::default(), provider); + + // Exactly 1024 should succeed + let result = EthApiServer::fee_history( + &server, + U64::from(1024), + BlockNumberOrTag::Tag(BlockTag::Latest), + None, + ) + .await .unwrap(); - assert_eq!(result.base_fee_per_gas.len(), 1025); // capped at 1024 + 1 + assert_eq!(result.base_fee_per_gas.len(), 1025); assert_eq!(result.gas_used_ratio.len(), 1024); - assert_eq!(result.oldest_block, U64::ZERO); - assert!(result.reward.is_none()); - assert!(result.base_fee_per_gas.iter().all(|fee| *fee == U256::ZERO)); - assert!(result.gas_used_ratio.iter().all(|ratio| *ratio == 0.0)); } // ==================== Transaction count extraction ==================== diff --git a/crates/rpc/evnode/examples/run_server.rs b/crates/rpc/evnode/examples/run_server.rs index f603fb6..4bb1421 100644 --- a/crates/rpc/evnode/examples/run_server.rs +++ b/crates/rpc/evnode/examples/run_server.rs @@ -141,6 +141,10 @@ async fn main() -> Result<(), Box> { max_gas: 30_000_000, max_bytes: 128 * 1024, }, + // No auth token for this local example. In production, set + // EVOLVE_EVNODE_AUTH_TOKEN and keep require_auth = true. + auth_token: None, + require_auth: false, }; tracing::info!("Starting EVNode gRPC server on {}", addr); diff --git a/crates/rpc/evnode/src/lib.rs b/crates/rpc/evnode/src/lib.rs index d03818f..092286a 100644 --- a/crates/rpc/evnode/src/lib.rs +++ b/crates/rpc/evnode/src/lib.rs @@ -53,7 +53,8 @@ pub mod proto { pub use error::EvnodeError; pub use runner::run_external_consensus_node_eth; pub use server::{ - start_evnode_server, start_evnode_server_with_mempool, EvnodeServer, EvnodeServerConfig, + make_auth_interceptor, start_evnode_server, start_evnode_server_with_mempool, EvnodeServer, + EvnodeServerConfig, AUTH_TOKEN_METADATA_KEY, }; pub use service::{ compute_state_root, BlockExecutedInfo, EvnodeStfExecutor, ExecutorServiceConfig, diff --git a/crates/rpc/evnode/src/runner.rs b/crates/rpc/evnode/src/runner.rs index 370c6ca..4c95292 100644 --- a/crates/rpc/evnode/src/runner.rs +++ b/crates/rpc/evnode/src/runner.rs @@ -537,6 +537,8 @@ pub fn run_external_consensus_node_eth< enable_gzip: config.grpc.enable_gzip, max_message_size: config.grpc_max_message_size_usize(), executor_config, + auth_token: std::env::var("EVOLVE_EVNODE_AUTH_TOKEN").ok(), + require_auth: true, }; let server = EvnodeServer::with_mempool(grpc_config, stf, storage.clone(), codes, mempool) diff --git a/crates/rpc/evnode/src/server.rs b/crates/rpc/evnode/src/server.rs index 0dc6cdb..7133ec9 100644 --- a/crates/rpc/evnode/src/server.rs +++ b/crates/rpc/evnode/src/server.rs @@ -6,7 +6,9 @@ use evolve_core::ReadonlyKV; use evolve_mempool::{Mempool, SharedMempool}; use evolve_stf_traits::AccountsCodeStorage; use evolve_tx_eth::TxContext; +use tonic::service::interceptor::InterceptedService; use tonic::transport::Server; +use tonic::{Request, Status}; use crate::proto::evnode::v1::executor_service_server::ExecutorServiceServer; use crate::service::{ @@ -14,8 +16,43 @@ use crate::service::{ StateChangeCallback, }; +/// Metadata key used to carry the shared-secret auth token. +pub const AUTH_TOKEN_METADATA_KEY: &str = "x-evnode-token"; + +/// Build a tonic interceptor that enforces a shared-secret bearer token. +/// +/// The interceptor reads the `x-evnode-token` metadata key from every +/// incoming request and rejects the call with `UNAUTHENTICATED` if the +/// value is absent or does not match `expected_token`. +/// +/// # Security note +/// +/// This provides defense-in-depth for the privileged evnode execution +/// interface. The server should **also** be bound to a loopback or VPN +/// interface — token auth is not a substitute for network-level isolation. +// `tonic::Status` is large by design; the Interceptor trait requires +// `Result, Status>` so we cannot box the error. +#[allow(clippy::result_large_err)] +pub fn make_auth_interceptor( + expected_token: String, +) -> impl Fn(Request<()>) -> Result, Status> + Clone { + move |req: Request<()>| { + let provided = req + .metadata() + .get(AUTH_TOKEN_METADATA_KEY) + .and_then(|v| v.to_str().ok()); + + match provided { + Some(token) if token == expected_token => Ok(req), + _ => Err(Status::unauthenticated( + "missing or invalid evnode auth token", + )), + } + } +} + /// Configuration for the EVNode gRPC server. -#[derive(Debug, Clone)] +#[derive(Clone)] pub struct EvnodeServerConfig { /// Address to bind the gRPC server to. pub addr: SocketAddr, @@ -25,6 +62,40 @@ pub struct EvnodeServerConfig { pub max_message_size: usize, /// Executor service configuration. pub executor_config: ExecutorServiceConfig, + /// Optional shared-secret auth token. + /// + /// When set, every inbound gRPC call must carry this value in the + /// `x-evnode-token` metadata header. Calls without a valid token + /// are rejected with `UNAUTHENTICATED`. + /// + /// The `EVOLVE_EVNODE_AUTH_TOKEN` environment variable is a convenient + /// source for this value. When `require_auth` is `true` (the default), + /// the server refuses to start if no token is provided. + pub auth_token: Option, + /// Require a non-empty `auth_token` before the server will start. + /// + /// Defaults to `true`. When `true` and `auth_token` is `None` the + /// `serve` / `serve_with_shutdown` methods return an error immediately + /// rather than starting an unauthenticated server. Set to `false` only + /// for local development or test environments where network-level + /// isolation makes token auth unnecessary. + pub require_auth: bool, +} + +impl std::fmt::Debug for EvnodeServerConfig { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("EvnodeServerConfig") + .field("addr", &self.addr) + .field("enable_gzip", &self.enable_gzip) + .field("max_message_size", &self.max_message_size) + .field("executor_config", &self.executor_config) + .field( + "auth_token", + &self.auth_token.as_ref().map(|_| ""), + ) + .field("require_auth", &self.require_auth) + .finish() + } } impl Default for EvnodeServerConfig { @@ -34,6 +105,10 @@ impl Default for EvnodeServerConfig { enable_gzip: true, max_message_size: 4 * 1024 * 1024, // 4MB executor_config: ExecutorServiceConfig::default(), + auth_token: std::env::var("EVOLVE_EVNODE_AUTH_TOKEN") + .ok() + .filter(|t| !t.trim().is_empty()), + require_auth: true, } } } @@ -110,25 +185,22 @@ where self } - /// Start the gRPC server. - /// - /// This method runs the server until it is shut down. - pub async fn serve(self) -> Result<(), Box> { - let addr = self.config.addr; - + /// Build the [`ExecutorServiceImpl`] from the current server state, consuming `self`. + fn build_service(self) -> (EvnodeServerConfig, ExecutorServiceImpl) { + let config = self.config; let mut service = match self.mempool { Some(mempool) => ExecutorServiceImpl::with_mempool( self.stf, self.storage, self.codes, - self.config.executor_config, + config.executor_config.clone(), mempool, ), None => ExecutorServiceImpl::new( self.stf, self.storage, self.codes, - self.config.executor_config, + config.executor_config.clone(), ), }; @@ -140,19 +212,61 @@ where service = service.with_on_block_executed(callback); } - let mut server = ExecutorServiceServer::new(service) - .max_decoding_message_size(self.config.max_message_size) - .max_encoding_message_size(self.config.max_message_size); + (config, service) + } - if self.config.enable_gzip { + /// Apply message size and compression settings to a raw service, returning + /// a fully-configured [`ExecutorServiceServer`]. + fn apply_settings( + service: ExecutorServiceImpl, + max_message_size: usize, + enable_gzip: bool, + ) -> ExecutorServiceServer> { + let mut server = ExecutorServiceServer::new(service) + .max_decoding_message_size(max_message_size) + .max_encoding_message_size(max_message_size); + if enable_gzip { server = server .accept_compressed(tonic::codec::CompressionEncoding::Gzip) .send_compressed(tonic::codec::CompressionEncoding::Gzip); } + server + } + + /// Start the gRPC server. + /// + /// Returns an error if `require_auth` is `true` and no `auth_token` is + /// configured. This is a fail-fast guard against accidentally exposing the + /// privileged execution interface without authentication. + pub async fn serve(self) -> Result<(), Box> { + let (config, service) = self.build_service(); + + if config.require_auth && config.auth_token.is_none() { + return Err( + "EVNode server refuses to start: require_auth is true but no auth_token is set. \ + Provide a token via EVOLVE_EVNODE_AUTH_TOKEN or EvnodeServerConfig::auth_token, \ + or set require_auth = false for dev/test environments." + .into(), + ); + } + let addr = config.addr; tracing::info!("Starting EVNode gRPC server on {}", addr); + let server = Self::apply_settings(service, config.max_message_size, config.enable_gzip); - Server::builder().add_service(server).serve(addr).await?; + if let Some(token) = config.auth_token { + tracing::info!( + "EVNode auth token is configured; unauthenticated calls will be rejected" + ); + let svc = InterceptedService::new(server, make_auth_interceptor(token)); + Server::builder().add_service(svc).serve(addr).await?; + } else { + tracing::warn!( + "EVNode server starting WITHOUT auth token — \ + set EVOLVE_EVNODE_AUTH_TOKEN or config.auth_token for production use" + ); + Server::builder().add_service(server).serve(addr).await?; + } Ok(()) } @@ -160,6 +274,9 @@ where /// Start the gRPC server with graceful shutdown. /// /// The server will shut down when the provided signal is received. + /// + /// Returns an error if `require_auth` is `true` and no `auth_token` is + /// configured. See [`serve`](Self::serve) for details. pub async fn serve_with_shutdown( self, signal: F, @@ -167,48 +284,40 @@ where where F: std::future::Future + Send, { - let addr = self.config.addr; + let (config, service) = self.build_service(); - let mut service = match self.mempool { - Some(mempool) => ExecutorServiceImpl::with_mempool( - self.stf, - self.storage, - self.codes, - self.config.executor_config, - mempool, - ), - None => ExecutorServiceImpl::new( - self.stf, - self.storage, - self.codes, - self.config.executor_config, - ), - }; - - if let Some(callback) = self.on_state_change { - service = service.with_state_change_callback(callback); - } - - if let Some(callback) = self.on_block_executed { - service = service.with_on_block_executed(callback); - } - - let mut server = ExecutorServiceServer::new(service) - .max_decoding_message_size(self.config.max_message_size) - .max_encoding_message_size(self.config.max_message_size); - - if self.config.enable_gzip { - server = server - .accept_compressed(tonic::codec::CompressionEncoding::Gzip) - .send_compressed(tonic::codec::CompressionEncoding::Gzip); + if config.require_auth && config.auth_token.is_none() { + return Err( + "EVNode server refuses to start: require_auth is true but no auth_token is set. \ + Provide a token via EVOLVE_EVNODE_AUTH_TOKEN or EvnodeServerConfig::auth_token, \ + or set require_auth = false for dev/test environments." + .into(), + ); } + let addr = config.addr; tracing::info!("Starting EVNode gRPC server on {}", addr); + let server = Self::apply_settings(service, config.max_message_size, config.enable_gzip); - Server::builder() - .add_service(server) - .serve_with_shutdown(addr, signal) - .await?; + if let Some(token) = config.auth_token { + tracing::info!( + "EVNode auth token is configured; unauthenticated calls will be rejected" + ); + let svc = InterceptedService::new(server, make_auth_interceptor(token)); + Server::builder() + .add_service(svc) + .serve_with_shutdown(addr, signal) + .await?; + } else { + tracing::warn!( + "EVNode server starting WITHOUT auth token — \ + set EVOLVE_EVNODE_AUTH_TOKEN or config.auth_token for production use" + ); + Server::builder() + .add_service(server) + .serve_with_shutdown(addr, signal) + .await?; + } Ok(()) } diff --git a/crates/rpc/evnode/src/service.rs b/crates/rpc/evnode/src/service.rs index 337881d..25b0aa4 100644 --- a/crates/rpc/evnode/src/service.rs +++ b/crates/rpc/evnode/src/service.rs @@ -630,6 +630,19 @@ where let mut cumulative_bytes: u64 = 0; let mut cumulative_gas: u64 = 0; + // Use request limits if non-zero, otherwise fall back to configured defaults. + // Callers must not be able to disable limits by sending zero. + let effective_max_bytes = if req.max_bytes > 0 { + req.max_bytes + } else { + self.config.max_bytes + }; + let effective_max_gas = if req.max_gas > 0 { + req.max_gas + } else { + self.config.max_gas + }; + for raw_tx in &req.txs { let tx_bytes = raw_tx.len() as u64; @@ -646,10 +659,9 @@ where let tx_gas = Self::estimate_tx_gas(&tx); // Check size limit - let would_exceed_bytes = - req.max_bytes > 0 && cumulative_bytes + tx_bytes > req.max_bytes; + let would_exceed_bytes = cumulative_bytes + tx_bytes > effective_max_bytes; // Check gas limit - let would_exceed_gas = req.max_gas > 0 && cumulative_gas + tx_gas > req.max_gas; + let would_exceed_gas = cumulative_gas + tx_gas > effective_max_gas; if would_exceed_bytes || would_exceed_gas { // Transaction is valid but doesn't fit - postpone it diff --git a/crates/rpc/grpc/src/conversion.rs b/crates/rpc/grpc/src/conversion.rs index a560d5d..2a15cb0 100644 --- a/crates/rpc/grpc/src/conversion.rs +++ b/crates/rpc/grpc/src/conversion.rs @@ -317,14 +317,32 @@ pub fn proto_to_log_filter(filter: &proto::LogFilter) -> RpcLogFilter { // ============================================================================ /// Convert proto CallRequest to RpcCallRequest. -pub fn proto_to_call_request(req: &proto::CallRequest) -> RpcCallRequest { - RpcCallRequest { - from: req.from.as_ref().and_then(proto_to_address), - to: req - .to - .as_ref() - .and_then(proto_to_address) - .unwrap_or(Address::ZERO), +/// +/// Returns `None` if address fields are present but have invalid byte lengths. +/// A missing `to` field is treated as contract creation and mapped to +/// `Address::ZERO`; callers must reject `None` rather than silently routing +/// malformed requests. +pub fn proto_to_call_request(req: &proto::CallRequest) -> Option { + let to = req.to.as_ref().and_then(proto_to_address).or_else(|| { + // A missing `to` field is only valid for contract-creation calls. + // We represent that as Address::ZERO here and let the STF decide, + // but only when the field is truly absent (None), not when it is + // present with an invalid encoding. + if req.to.is_none() { + Some(Address::ZERO) + } else { + None + } + })?; + + let from = match req.from.as_ref() { + Some(f) => Some(proto_to_address(f)?), + None => None, + }; + + Some(RpcCallRequest { + from, + to, gas: req.gas.map(U64::from), gas_price: req.gas_price.as_ref().map(proto_to_u256), max_fee_per_gas: req.max_fee_per_gas.as_ref().map(proto_to_u256), @@ -332,7 +350,7 @@ pub fn proto_to_call_request(req: &proto::CallRequest) -> RpcCallRequest { value: req.value.as_ref().map(proto_to_u256), data: req.data.as_ref().map(|d| Bytes::copy_from_slice(d)), input: None, - } + }) } // ============================================================================ diff --git a/crates/rpc/grpc/src/services/execution.rs b/crates/rpc/grpc/src/services/execution.rs index 997d8b7..04fad09 100644 --- a/crates/rpc/grpc/src/services/execution.rs +++ b/crates/rpc/grpc/src/services/execution.rs @@ -344,8 +344,10 @@ impl ExecutionService for ExecutionServiceImpl { let call_request = req .request .as_ref() - .map(proto_to_call_request) - .ok_or_else(|| GrpcError::InvalidArgument("Missing call request".to_string()))?; + .and_then(proto_to_call_request) + .ok_or_else(|| { + GrpcError::InvalidArgument("Missing or invalid call request".to_string()) + })?; let block_num = self .resolve_block(req.block.as_ref()) @@ -371,8 +373,10 @@ impl ExecutionService for ExecutionServiceImpl { let call_request = req .request .as_ref() - .map(proto_to_call_request) - .ok_or_else(|| GrpcError::InvalidArgument("Missing call request".to_string()))?; + .and_then(proto_to_call_request) + .ok_or_else(|| { + GrpcError::InvalidArgument("Missing or invalid call request".to_string()) + })?; let block_num = self .resolve_block(req.block.as_ref()) @@ -771,7 +775,7 @@ mod tests { .await .expect_err("missing call request should fail"); assert_eq!(err.code(), tonic::Code::InvalidArgument); - assert!(err.message().contains("Missing call request")); + assert!(err.message().contains("Missing or invalid call request")); } #[tokio::test] diff --git a/crates/rpc/types/Cargo.toml b/crates/rpc/types/Cargo.toml index cb3c421..9953673 100644 --- a/crates/rpc/types/Cargo.toml +++ b/crates/rpc/types/Cargo.toml @@ -18,6 +18,7 @@ sha2.workspace = true [dev-dependencies] proptest = "1.4" +evolve_testing = { workspace = true, features = ["proptest"] } [lints] workspace = true diff --git a/crates/rpc/types/src/lib.rs b/crates/rpc/types/src/lib.rs index d974ab7..3453121 100644 --- a/crates/rpc/types/src/lib.rs +++ b/crates/rpc/types/src/lib.rs @@ -197,6 +197,7 @@ mod tests { #[cfg(test)] mod proptests { use super::*; + use evolve_testing::proptest_config::proptest_config; use proptest::prelude::*; fn arb_account_id() -> impl Strategy { @@ -284,6 +285,8 @@ mod proptests { } proptest! { + #![proptest_config(proptest_config())] + #[test] fn prop_account_id_to_address_is_deterministic(id in arb_account_id()) { prop_assert_eq!(account_id_to_address(id), account_id_to_address(id)); diff --git a/crates/rpc/types/src/receipt.rs b/crates/rpc/types/src/receipt.rs index 993142c..bcdcf7c 100644 --- a/crates/rpc/types/src/receipt.rs +++ b/crates/rpc/types/src/receipt.rs @@ -42,6 +42,9 @@ pub struct RpcReceipt { pub tx_type: U64, /// Status (1 = success, 0 = failure) pub status: U64, + /// Revert reason for failed transactions. + #[serde(skip_serializing_if = "Option::is_none")] + pub revert_reason: Option, } impl RpcReceipt { @@ -78,6 +81,7 @@ impl RpcReceipt { logs_bloom: Bytes::new(), tx_type: U64::ZERO, status: U64::from(Self::STATUS_SUCCESS), + revert_reason: None, } } @@ -108,6 +112,7 @@ impl RpcReceipt { logs_bloom: Bytes::new(), tx_type: U64::ZERO, status: U64::from(Self::STATUS_FAILURE), + revert_reason: None, } } diff --git a/crates/storage/src/block_store.rs b/crates/storage/src/block_store.rs index 9b29f99..b0a04b4 100644 --- a/crates/storage/src/block_store.rs +++ b/crates/storage/src/block_store.rs @@ -117,9 +117,14 @@ where key_partition: format!("{}-block-index", config.partition_prefix), key_page_cache, value_partition: format!("{}-block-data", config.partition_prefix), - // No compression by default. Blocks are often already compressed (gzip/zstd - // at the application layer), so double-compression wastes CPU. - compression: None, + // Zstd compression for block data. Borsh-encoded ArchivedBlocks contain + // repetitive structure (hashes, encoded txs) and compress well. + // Level 3 gives ~2x ratio with minimal CPU. 0 = disabled. + compression: if config.compression_level > 0 { + Some(config.compression_level) + } else { + None + }, // `bytes::Bytes` uses `RangeCfg` as its codec config. // An unbounded range accepts blocks of any size. codec_config: RangeCfg::from(..), diff --git a/crates/storage/src/cache.rs b/crates/storage/src/cache.rs index 69c6a4a..9c358f9 100644 --- a/crates/storage/src/cache.rs +++ b/crates/storage/src/cache.rs @@ -250,9 +250,16 @@ impl ShardedDbCache { /// Prefetches multiple keys in parallel by grouping them by shard. /// This is useful for cache warming before transaction execution. - pub fn prefetch_keys(&self, keys: &[Vec], fetch_fn: F) + /// + /// The fetch function must return `Ok(Some(v))` if the key exists, + /// `Ok(None)` if the key is confirmed absent, or `Err(_)` to skip + /// caching the key entirely (e.g. on transient I/O errors). + /// + /// Using `Err` on error prevents a transient failure from incorrectly + /// populating a negative cache entry that would mask later successful reads. + pub fn prefetch_keys(&self, keys: &[Vec], fetch_fn: F) where - F: Fn(&[u8]) -> Option> + Sync, + F: Fn(&[u8]) -> Result>, E> + Sync, { // Group keys by shard to minimize lock acquisitions let mut shard_keys: [Vec<&Vec>; NUM_SHARDS] = std::array::from_fn(|_| Vec::new()); @@ -273,12 +280,13 @@ impl ShardedDbCache { continue; } - // Fetch from storage and cache + // Fetch from storage and cache; errors leave the key as a cache miss. match fetch_fn(key) { - Some(value) => { + Ok(Some(value)) => { self.shards[shard_idx].insert((*key).clone(), CachedValue::Present(value)) } - None => self.shards[shard_idx].insert((*key).clone(), CachedValue::Absent), + Ok(None) => self.shards[shard_idx].insert((*key).clone(), CachedValue::Absent), + Err(_) => {} // transient error — skip caching, will retry on next access } } } @@ -533,9 +541,11 @@ mod tests { b"\x00key3".to_vec(), // Same shard as key1 ]; - // Mock fetch function - cache.prefetch_keys(&keys, |key| { - Some(format!("value_for_{}", String::from_utf8_lossy(key)).into_bytes()) + // Mock fetch function — returns Ok(Some(_)) for all keys (infallible). + cache.prefetch_keys(&keys, |key| -> Result>, ()> { + Ok(Some( + format!("value_for_{}", String::from_utf8_lossy(key)).into_bytes(), + )) }); // All keys should be cached diff --git a/crates/storage/src/qmdb_impl.rs b/crates/storage/src/qmdb_impl.rs index b37ef6d..16090cb 100644 --- a/crates/storage/src/qmdb_impl.rs +++ b/crates/storage/src/qmdb_impl.rs @@ -446,11 +446,16 @@ where merkleized.finalize() }; db.apply_batch(changeset).await.map_err(map_qmdb_error)?; - } - for key in keys_to_invalidate { - self.cache.invalidate(&key); + // Invalidate cache entries while still holding the write lock so that + // no concurrent reader can observe a stale cached value between the + // QMDB commit and the invalidation (M-1 fix). + for key in &keys_to_invalidate { + self.cache.invalidate(key); + } } + // keys_to_invalidate dropped here; the Vec was consumed by the reference loop above. + drop(keys_to_invalidate); self.metrics .record_batch(start.elapsed().as_secs_f64(), ops_count, sets, deletes); @@ -483,16 +488,10 @@ where result: Result>, impl std::fmt::Display>, ) -> Result>, ErrorCode> { match result { - Ok(Some(value)) => Ok(Some(value)), - Ok(None) => Ok(None), + Ok(value) => Ok(value), Err(e) => { - let err_str = e.to_string(); - if err_str.contains("not found") { - Ok(None) - } else { - tracing::error!("QMDB read error: {err_str}"); - Err(crate::types::ERR_ADB_ERROR) - } + tracing::error!("QMDB read error: {e}"); + Err(crate::types::ERR_ADB_ERROR) } } } diff --git a/crates/storage/src/types.rs b/crates/storage/src/types.rs index 8ff405c..9b78771 100644 --- a/crates/storage/src/types.rs +++ b/crates/storage/src/types.rs @@ -41,6 +41,23 @@ pub struct BlockStorageConfig { /// /// Default: 4096 bytes pub replay_buffer: usize, + + /// Zstd compression level for block data (1-21). + /// + /// Borsh-encoded blocks compress well due to repetitive hash structures. + /// Level 3 gives ~2x compression with minimal CPU overhead. + /// Set to 0 to disable compression. + /// Default: 3 + pub compression_level: u8, + + /// Number of recent blocks to retain before pruning. + /// + /// Pruning happens at section granularity (`blocks_per_section`), so the + /// actual retention may be slightly higher. Set to 0 to disable pruning + /// (keep all blocks). + /// + /// Default: 100_000 (~1 day at 1 block/sec) + pub retention_blocks: u64, } impl Default for BlockStorageConfig { @@ -51,6 +68,8 @@ impl Default for BlockStorageConfig { key_write_buffer: 1024 * 1024, // 1MB value_write_buffer: 4 * 1024 * 1024, // 4MB replay_buffer: 4096, + compression_level: 3, + retention_blocks: 100_000, } } } diff --git a/crates/storage/src/warming.rs b/crates/storage/src/warming.rs index 2154753..0f2a0f7 100644 --- a/crates/storage/src/warming.rs +++ b/crates/storage/src/warming.rs @@ -76,11 +76,10 @@ where return; } - // Prefetch into cache - cache.prefetch_keys(&all_keys, |key| { - // Fetch from storage, ignoring errors (treat as absent) - storage.get(key).ok().flatten() - }); + // Prefetch into cache. + // Pass storage errors as Err so prefetch_keys does not cache a false-absent + // entry for a key that failed due to a transient I/O problem (M-3 fix). + cache.prefetch_keys(&all_keys, |key| storage.get(key)); } /// Warms the cache asynchronously for a batch of transactions. @@ -326,4 +325,44 @@ mod tests { let keys = token_transfer_keys(token, sender, recipient); assert_eq!(keys.len(), 3); } + + /// Storage that returns an error for one specific key. + struct FaultingStorage { + inner: MockStorage, + fault_key: Vec, + } + + impl ReadonlyKV for FaultingStorage { + fn get(&self, key: &[u8]) -> Result>, ErrorCode> { + if key == self.fault_key.as_slice() { + Err(ErrorCode::new(0xFF)) + } else { + self.inner.get(key) + } + } + } + + /// Regression test for M-3: a storage error during cache warming must not + /// populate a false-absent cache entry that silences future successful reads. + #[test] + fn test_warm_cache_storage_error_does_not_cache_absent() { + let cache = ShardedDbCache::with_defaults(); + let fault_key = b"key_error".to_vec(); + let storage = FaultingStorage { + inner: MockStorage::new(), + fault_key: fault_key.clone(), + }; + + let txs = vec![TestTx { + keys: vec![fault_key.clone()], + }]; + + warm_cache(&cache, &storage, &txs, &TestPredictor); + + // The faulting key must remain a cache miss, not a false Absent entry. + assert!( + cache.get(&fault_key).is_none(), + "a transient storage error must not populate a false-absent cache entry" + ); + } } diff --git a/crates/testing/simulator/src/eth_eoa.rs b/crates/testing/simulator/src/eth_eoa.rs index aa661e5..cb17430 100644 --- a/crates/testing/simulator/src/eth_eoa.rs +++ b/crates/testing/simulator/src/eth_eoa.rs @@ -1,5 +1,8 @@ use crate::Simulator; -use evolve_core::{runtime_api::ACCOUNT_IDENTIFIER_PREFIX, AccountId, ErrorCode, Message}; +use evolve_core::{ + runtime_api::{ACCOUNT_IDENTIFIER_PREFIX, ACCOUNT_STORAGE_PREFIX}, + AccountId, ErrorCode, Message, +}; use evolve_stf_traits::StateChange; use k256::ecdsa::SigningKey; @@ -33,7 +36,7 @@ pub fn init_eth_eoa_storage( account_id: AccountId, eth_address: [u8; 20], ) -> Result<(), ErrorCode> { - let mut nonce_key = Vec::with_capacity(account_id.as_bytes().len() + 1); + let mut nonce_key = vec![ACCOUNT_STORAGE_PREFIX]; nonce_key.extend_from_slice(&account_id.as_bytes()); nonce_key.push(0u8); let nonce_value = Message::new(&0u64) @@ -41,7 +44,7 @@ pub fn init_eth_eoa_storage( .into_bytes() .expect("nonce bytes"); - let mut addr_key = Vec::with_capacity(account_id.as_bytes().len() + 1); + let mut addr_key = vec![ACCOUNT_STORAGE_PREFIX]; addr_key.extend_from_slice(&account_id.as_bytes()); addr_key.push(1u8); let addr_value = Message::new(ð_address)