Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
26 commits
Select commit Hold shift + click to select a range
f8f3c8f
feat: add DA inclusion status tracking from ev-node
pthmas Mar 13, 2026
8b71f59
fix: default evnode client to JSON mode
pthmas Mar 13, 2026
c82597f
feat: show DA status in blocks list and simplify detail page display
pthmas Mar 15, 2026
d8e77fe
feat: add live DA status SSE, optimize DA worker performance
pthmas Mar 15, 2026
acb8217
refactor: simplify ev-node client to JSON-only
pthmas Mar 15, 2026
6a67158
chore: remove bloat serialization field-presence tests
pthmas Mar 15, 2026
cf12d18
chore: remove SSE serialization tests
pthmas Mar 15, 2026
3e4980f
fix: resolve CI lint and format failures
pthmas Mar 15, 2026
905e1ec
fix: restore setTick useState declaration removed by linter
pthmas Mar 15, 2026
3ae6ae7
Fix DA batch handling on block detail page
pthmas Mar 15, 2026
e68246f
Fix frontend lint failure on block detail page
pthmas Mar 16, 2026
49c0f2d
feat: merge atlas server architecture and DA tracking updates
pthmas Mar 18, 2026
04a7449
Merge remote-tracking branch 'origin/main' into pierrick/da-inclusion
pthmas Mar 18, 2026
e409d9b
refactor: send DA SSE payloads directly from DA worker
pthmas Mar 18, 2026
21a4d0b
fix: use axum 0.8-compatible SSE return type
pthmas Mar 18, 2026
2894a34
fix: reduce blocks page size and DA glow transitions
pthmas Mar 18, 2026
be051a6
Add explicit DA tracking controls
pthmas Mar 18, 2026
856a617
Fix local quality issues
pthmas Mar 18, 2026
0870e1d
Address DA review follow-ups
pthmas Mar 18, 2026
d401b25
Merge origin/main into pierrick/da-inclusion
pthmas Mar 18, 2026
09897fe
Fix DA worker and buffered SSE updates
pthmas Mar 18, 2026
ae1f38c
Clarify explicit DA tracking enablement
pthmas Mar 18, 2026
8f65ade
Merge origin/main into pierrick/da-inclusion
pthmas Mar 18, 2026
c8fce71
Merge origin/main into pierrick/da-inclusion
pthmas Mar 19, 2026
fadeab2
Prevent stale DA override sync frames
pthmas Mar 19, 2026
fcfa6ba
Merge origin/main into pierrick/da-inclusion
pthmas Mar 19, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 12 additions & 0 deletions .env.example
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,18 @@ RPC_BATCH_SIZE=20
# API_DB_MAX_CONNECTIONS=20
# SSE_REPLAY_BUFFER_BLOCKS=4096 # replay tail used only for active connected clients

# Optional: enable DA (Data Availability) inclusion tracking from ev-node.
# Set this to true only when you also provide EVNODE_URL below.
ENABLE_DA_TRACKING=false

# Required when ENABLE_DA_TRACKING=true.
# Must be reachable from the atlas-server process/container.
# EVNODE_URL=http://<ev-node-host-reachable-from-atlas-server>:7331

# Optional when ENABLE_DA_TRACKING=true.
# DA_RPC_REQUESTS_PER_SECOND=50
# DA_WORKER_CONCURRENCY=50

# Branding / white-label (all optional)
# CHAIN_LOGO_URL= # URL or path to logo (e.g., /branding/logo.svg). Default: bundled logo
# ACCENT_COLOR= # Primary accent hex (e.g. #3b82f6). Default: #dc2626 (red)
Expand Down
25 changes: 18 additions & 7 deletions CLAUDE.md
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ let cursor = (total_count - 1) - (pagination.page.saturating_sub(1) as i64) * li
### Row count estimation
For large tables (transactions, addresses), use `pg_class.reltuples` instead of `COUNT(*)`:
```rust
// handlers/mod.rs — get_table_count(pool)
// handlers/mod.rs — get_table_count(pool, table_name)
// Partition-aware: sums child reltuples, falls back to parent
// For tables < 100k rows: falls back to exact COUNT(*)
```
Expand All @@ -77,19 +77,25 @@ For large tables (transactions, addresses), use `pg_class.reltuples` instead of
### AppState (API)
```rust
pub struct AppState {
pub pool: PgPool, // API pool only
pub block_events_tx: broadcast::Sender<()>, // shared with indexer
pub pool: PgPool, // API pool only
pub block_events_tx: broadcast::Sender<()>, // shared with indexer
pub da_events_tx: broadcast::Sender<Vec<DaSseUpdate>>, // shared with DA worker
pub head_tracker: Arc<HeadTracker>,
pub rpc_url: String,
pub solc_path: String,
pub admin_api_key: Option<String>,
pub da_tracking_enabled: bool,
pub chain_id: u64,
pub chain_name: String,
}
```

### DA tracking (optional)
When `ENABLE_DA_TRACKING=true`, a background DA worker queries ev-node for Celestia inclusion heights per block. `EVNODE_URL` is required only in that mode. Updates are pushed to SSE clients via an in-process `broadcast::Sender<Vec<DaSseUpdate>>`. The SSE handler streams `da_batch` events for incremental updates and emits `da_resync` when a client falls behind and should refetch visible DA state.

### Frontend API client
- Base URL: `/api` (proxied by nginx to `atlas-server:3000`)
- Fast polling endpoint: `GET /api/height` → `{ block_height, indexed_at }` — single key-value lookup from `indexer_state`, sub-ms. Used by the navbar as a polling fallback when SSE is disconnected.
- Fast polling endpoint: `GET /api/height` → `{ block_height, indexed_at, features: { da_tracking } }` — serves from `head_tracker` first and falls back to `indexer_state` when the in-memory head is empty. Used by the navbar as a polling fallback when SSE is disconnected and by feature-flag consumers.
- Chain status: `GET /api/status` → `{ chain_id, chain_name, block_height, total_transactions, total_addresses, indexed_at }` — full chain info, fetched once on page load.
- `GET /api/events` → SSE stream of `new_block` events, one per block in order. Primary live-update path for navbar counter and blocks page. Falls back to `/api/height` polling on disconnect.
- `GET /api/events` → SSE stream of `new_block`, `da_batch`, and `da_resync` events. Primary live-update path for navbar counter, blocks page, block detail DA status, and DA resync handling. Falls back to `/api/height` polling on disconnect.

## Important Conventions

Expand All @@ -109,13 +115,18 @@ Key vars (see `.env.example` for full list):
|---|---|---|
| `DATABASE_URL` | all | required |
| `RPC_URL` | server | required |
| `CHAIN_NAME` | server | `"Unknown"` |
| `DB_MAX_CONNECTIONS` | indexer pool | `20` |
| `API_DB_MAX_CONNECTIONS` | API pool | `20` |
| `BATCH_SIZE` | indexer | `100` |
| `FETCH_WORKERS` | indexer | `10` |
| `ADMIN_API_KEY` | API | none |
| `API_HOST` | API | `127.0.0.1` |
| `API_PORT` | API | `3000` |
| `ENABLE_DA_TRACKING` | server | `false` |
| `EVNODE_URL` | server | none |
| `DA_RPC_REQUESTS_PER_SECOND` | DA worker | `50` |
| `DA_WORKER_CONCURRENCY` | DA worker | `50` |

## Running Locally

Expand Down
12 changes: 12 additions & 0 deletions backend/crates/atlas-common/src/types.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,18 @@ pub struct Block {
pub indexed_at: DateTime<Utc>,
}

/// DA (Data Availability) status for a block on L2 chains using Celestia.
/// Only populated when DA tracking is enabled and the DA worker has checked the block.
#[derive(Debug, Clone, Serialize, Deserialize, FromRow)]
pub struct BlockDaStatus {
pub block_number: i64,
/// Celestia height where the block header was submitted. 0 = pending.
pub header_da_height: i64,
/// Celestia height where the block data was submitted. 0 = pending.
pub data_da_height: i64,
pub updated_at: DateTime<Utc>,
}

/// Transaction data as stored in the database
#[derive(Debug, Clone, Serialize, Deserialize, FromRow)]
pub struct Transaction {
Expand Down
58 changes: 50 additions & 8 deletions backend/crates/atlas-server/src/api/handlers/blocks.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,16 +2,27 @@ use axum::{
extract::{Path, Query, State},
Json,
};
use serde::Serialize;
use std::sync::Arc;

use crate::api::error::ApiResult;
use crate::api::AppState;
use atlas_common::{AtlasError, Block, PaginatedResponse, Pagination, Transaction};
use atlas_common::{AtlasError, Block, BlockDaStatus, PaginatedResponse, Pagination, Transaction};

/// Block response with optional DA status.
/// DA fields are always present in the JSON (null when no data),
/// so the frontend can rely on a stable schema.
#[derive(Serialize)]
pub struct BlockResponse {
#[serde(flatten)]
pub block: Block,
pub da_status: Option<BlockDaStatus>,
}

pub async fn list_blocks(
State(state): State<Arc<AppState>>,
Query(pagination): Query<Pagination>,
) -> ApiResult<Json<PaginatedResponse<Block>>> {
) -> ApiResult<Json<PaginatedResponse<BlockResponse>>> {
// Use MAX(number) + 1 instead of COUNT(*) - blocks are sequential so this is accurate
// This is ~6500x faster than COUNT(*) on large tables
let total: (Option<i64>,) = sqlx::query_as("SELECT MAX(number) + 1 FROM blocks")
Expand All @@ -30,15 +41,37 @@ pub async fn list_blocks(
FROM blocks
WHERE number <= $2
ORDER BY number DESC
LIMIT $1"
LIMIT $1",
)
.bind(limit)
.bind(cursor)
.fetch_all(&state.pool)
.await?;

// Batch-fetch DA status for all blocks in this page
let block_numbers: Vec<i64> = blocks.iter().map(|b| b.number).collect();
let da_rows: Vec<BlockDaStatus> = sqlx::query_as(
"SELECT block_number, header_da_height, data_da_height, updated_at
FROM block_da_status
WHERE block_number = ANY($1)",
)
.bind(&block_numbers)
.fetch_all(&state.pool)
.await?;

let da_map: std::collections::HashMap<i64, BlockDaStatus> =
da_rows.into_iter().map(|d| (d.block_number, d)).collect();

let responses: Vec<BlockResponse> = blocks
.into_iter()
.map(|block| {
let da_status = da_map.get(&block.number).cloned();
BlockResponse { block, da_status }
})
.collect();

Ok(Json(PaginatedResponse::new(
blocks,
responses,
pagination.page,
pagination.limit,
total_count,
Expand All @@ -48,18 +81,27 @@ pub async fn list_blocks(
pub async fn get_block(
State(state): State<Arc<AppState>>,
Path(number): Path<i64>,
) -> ApiResult<Json<Block>> {
) -> ApiResult<Json<BlockResponse>> {
let block: Block = sqlx::query_as(
"SELECT number, hash, parent_hash, timestamp, gas_used, gas_limit, transaction_count, indexed_at
FROM blocks
WHERE number = $1"
WHERE number = $1",
)
.bind(number)
.fetch_optional(&state.pool)
.await?
.ok_or_else(|| AtlasError::NotFound(format!("Block {} not found", number)))?;

Ok(Json(block))
let da_status: Option<BlockDaStatus> = sqlx::query_as(
"SELECT block_number, header_da_height, data_da_height, updated_at
FROM block_da_status
WHERE block_number = $1",
)
.bind(number)
.fetch_optional(&state.pool)
.await?;

Ok(Json(BlockResponse { block, da_status }))
}

pub async fn get_block_transactions(
Expand All @@ -77,7 +119,7 @@ pub async fn get_block_transactions(
FROM transactions
WHERE block_number = $1
ORDER BY block_index ASC
LIMIT $2 OFFSET $3"
LIMIT $2 OFFSET $3",
)
.bind(number)
.bind(pagination.limit())
Expand Down
3 changes: 3 additions & 0 deletions backend/crates/atlas-server/src/api/handlers/faucet.rs
Original file line number Diff line number Diff line change
Expand Up @@ -161,11 +161,14 @@ mod tests {
.expect("lazy pool");
let head_tracker = Arc::new(crate::head::HeadTracker::empty(10));
let (tx, _) = broadcast::channel(1);
let (da_tx, _) = broadcast::channel(1);
Arc::new(AppState {
pool,
block_events_tx: tx,
da_events_tx: da_tx,
head_tracker,
rpc_url: String::new(),
da_tracking_enabled: false,
faucet,
chain_id: 1,
chain_name: "Test Chain".to_string(),
Expand Down
Loading
Loading