From d8995bfe36cec37d0d3bb703aa2926404aafcc76 Mon Sep 17 00:00:00 2001 From: YoVinchen Date: Sun, 10 Aug 2025 02:46:52 +0800 Subject: [PATCH] =?UTF-8?q?=E4=BF=AE=E6=94=B9=E8=AE=A1=E7=AE=97=E8=A7=84?= =?UTF-8?q?=E5=88=99=E4=BB=A5=E5=8F=8A=E6=95=B0=E6=8D=AE=E5=BA=93?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src-tauri/src/commands/mod.rs | 1 + src-tauri/src/commands/usage.rs | 180 ++++---- src-tauri/src/commands/usage_cache.rs | 571 ++++++++++++++++++++++++++ src-tauri/src/main.rs | 9 + src/components/UsageDashboard.tsx | 45 +- src/lib/api.ts | 29 +- 6 files changed, 733 insertions(+), 102 deletions(-) create mode 100644 src-tauri/src/commands/usage_cache.rs diff --git a/src-tauri/src/commands/mod.rs b/src-tauri/src/commands/mod.rs index b1f7d31..f34f95d 100644 --- a/src-tauri/src/commands/mod.rs +++ b/src-tauri/src/commands/mod.rs @@ -3,6 +3,7 @@ pub mod claude; pub mod mcp; pub mod usage; pub mod usage_index; +pub mod usage_cache; pub mod storage; pub mod slash_commands; pub mod proxy; diff --git a/src-tauri/src/commands/usage.rs b/src-tauri/src/commands/usage.rs index 75e08db..3c47441 100644 --- a/src-tauri/src/commands/usage.rs +++ b/src-tauri/src/commands/usage.rs @@ -8,97 +8,98 @@ use tauri::command; #[derive(Debug, Serialize, Deserialize, Clone)] pub struct UsageEntry { - timestamp: String, - model: String, - input_tokens: u64, - output_tokens: u64, - cache_creation_tokens: u64, - cache_read_tokens: u64, - cost: f64, - session_id: String, - project_path: String, + pub timestamp: String, + pub model: String, + pub input_tokens: u64, + pub output_tokens: u64, + pub cache_creation_tokens: u64, + pub cache_read_tokens: u64, + pub cost: f64, + pub session_id: String, + pub project_path: String, } #[derive(Debug, Serialize, Deserialize)] pub struct UsageStats { - total_cost: f64, - total_tokens: u64, - total_input_tokens: u64, - total_output_tokens: u64, - total_cache_creation_tokens: u64, - total_cache_read_tokens: u64, - total_sessions: u64, - by_model: Vec, - by_date: Vec, - by_project: Vec, + pub total_cost: f64, + pub total_tokens: u64, + pub total_input_tokens: u64, + pub total_output_tokens: u64, + pub total_cache_creation_tokens: u64, + pub total_cache_read_tokens: u64, + pub total_sessions: u64, + pub by_model: Vec, + pub by_date: Vec, + pub by_project: Vec, } #[derive(Debug, Serialize, Deserialize)] pub struct ModelUsage { - model: String, - total_cost: f64, - total_tokens: u64, - input_tokens: u64, - output_tokens: u64, - cache_creation_tokens: u64, - cache_read_tokens: u64, - session_count: u64, + pub model: String, + pub total_cost: f64, + pub total_tokens: u64, + pub input_tokens: u64, + pub output_tokens: u64, + pub cache_creation_tokens: u64, + pub cache_read_tokens: u64, + pub session_count: u64, } #[derive(Debug, Serialize, Deserialize)] pub struct DailyUsage { - date: String, - total_cost: f64, - total_tokens: u64, + pub date: String, + pub total_cost: f64, + pub total_tokens: u64, // New detailed per-day breakdowns - input_tokens: u64, - output_tokens: u64, - cache_creation_tokens: u64, - cache_read_tokens: u64, - request_count: u64, - models_used: Vec, + pub input_tokens: u64, + pub output_tokens: u64, + pub cache_creation_tokens: u64, + pub cache_read_tokens: u64, + pub request_count: u64, + pub models_used: Vec, } #[derive(Debug, Serialize, Deserialize)] pub struct ProjectUsage { - project_path: String, - project_name: String, - total_cost: f64, - total_tokens: u64, - session_count: u64, - last_used: String, + pub project_path: String, + pub project_name: String, + pub total_cost: f64, + pub total_tokens: u64, + pub session_count: u64, + pub last_used: String, } // Claude pricing constants (per million tokens) -// Claude 4 系列 -const OPUS_4_INPUT_PRICE: f64 = 15.0; -const OPUS_4_OUTPUT_PRICE: f64 = 75.0; -const OPUS_4_CACHE_WRITE_PRICE: f64 = 18.75; -const OPUS_4_CACHE_READ_PRICE: f64 = 1.20; // 修正为 1.20 +// 最新价格表 (2025-01) +// Claude 4.x 系列 +const OPUS_4_1_INPUT_PRICE: f64 = 15.0; // Opus 4.1 +const OPUS_4_1_OUTPUT_PRICE: f64 = 75.0; +const OPUS_4_1_CACHE_WRITE_PRICE: f64 = 18.75; +const OPUS_4_1_CACHE_READ_PRICE: f64 = 1.50; // 更新为 1.50 -const SONNET_4_INPUT_PRICE: f64 = 3.0; +const SONNET_4_INPUT_PRICE: f64 = 3.0; // Sonnet 4 const SONNET_4_OUTPUT_PRICE: f64 = 15.0; const SONNET_4_CACHE_WRITE_PRICE: f64 = 3.75; const SONNET_4_CACHE_READ_PRICE: f64 = 0.30; -// Claude 3.x 系列 -// Sonnet 3.7/3.5 +// Claude 3.x 系列 (旧版本,价格可能不同) +// Sonnet 3.7/3.5 - 假设与 Sonnet 4 相同 const SONNET_3_INPUT_PRICE: f64 = 3.0; const SONNET_3_OUTPUT_PRICE: f64 = 15.0; const SONNET_3_CACHE_WRITE_PRICE: f64 = 3.75; const SONNET_3_CACHE_READ_PRICE: f64 = 0.30; -// Opus 3 +// Opus 3 - 假设与 Opus 4.1 相同 const OPUS_3_INPUT_PRICE: f64 = 15.0; const OPUS_3_OUTPUT_PRICE: f64 = 75.0; const OPUS_3_CACHE_WRITE_PRICE: f64 = 18.75; -const OPUS_3_CACHE_READ_PRICE: f64 = 1.20; +const OPUS_3_CACHE_READ_PRICE: f64 = 1.50; -// Haiku 3.5 -const HAIKU_3_INPUT_PRICE: f64 = 0.80; -const HAIKU_3_OUTPUT_PRICE: f64 = 4.0; -const HAIKU_3_CACHE_WRITE_PRICE: f64 = 1.0; -const HAIKU_3_CACHE_READ_PRICE: f64 = 0.08; +// Haiku 3.5 - 最具性价比 +const HAIKU_3_5_INPUT_PRICE: f64 = 0.80; +const HAIKU_3_5_OUTPUT_PRICE: f64 = 4.0; +const HAIKU_3_5_CACHE_WRITE_PRICE: f64 = 1.0; +const HAIKU_3_5_CACHE_READ_PRICE: f64 = 0.08; #[derive(Debug, Deserialize)] struct JsonlEntry { @@ -147,40 +148,47 @@ fn calculate_cost(model: &str, usage: &UsageData) -> f64 { cost } -// 独立的模型价格匹配函数,更灵活的模型识别 +// 独立的模型价格匹配函数,更精确的模型识别 fn match_model_prices(model_lower: &str) -> (f64, f64, f64, f64) { - // Claude 4 系列 - if model_lower.contains("opus") && (model_lower.contains("4") || model_lower.contains("4.")) { - (OPUS_4_INPUT_PRICE, OPUS_4_OUTPUT_PRICE, OPUS_4_CACHE_WRITE_PRICE, OPUS_4_CACHE_READ_PRICE) - } else if model_lower.contains("sonnet") && (model_lower.contains("4") || model_lower.contains("4.")) { + // Claude Opus 4.1 (最新最强) + if model_lower.contains("opus") && (model_lower.contains("4-1") || model_lower.contains("4.1")) { + (OPUS_4_1_INPUT_PRICE, OPUS_4_1_OUTPUT_PRICE, OPUS_4_1_CACHE_WRITE_PRICE, OPUS_4_1_CACHE_READ_PRICE) + } + // Claude Sonnet 4 + else if model_lower.contains("sonnet") && (model_lower.contains("-4-") || model_lower.contains("sonnet-4")) { (SONNET_4_INPUT_PRICE, SONNET_4_OUTPUT_PRICE, SONNET_4_CACHE_WRITE_PRICE, SONNET_4_CACHE_READ_PRICE) } + // Claude Haiku 3.5 + else if model_lower.contains("haiku") { + (HAIKU_3_5_INPUT_PRICE, HAIKU_3_5_OUTPUT_PRICE, HAIKU_3_5_CACHE_WRITE_PRICE, HAIKU_3_5_CACHE_READ_PRICE) + } // Claude 3.x Sonnet 系列(3.7, 3.5) else if model_lower.contains("sonnet") && - (model_lower.contains("3.7") || model_lower.contains("3.5") || model_lower.contains("3-5")) { + (model_lower.contains("3-7") || model_lower.contains("3.7") || + model_lower.contains("3-5") || model_lower.contains("3.5")) { (SONNET_3_INPUT_PRICE, SONNET_3_OUTPUT_PRICE, SONNET_3_CACHE_WRITE_PRICE, SONNET_3_CACHE_READ_PRICE) } - // Claude 3 Opus - else if model_lower.contains("opus") && - (model_lower.contains("3") || (!model_lower.contains("4") && !model_lower.contains("4."))) { + // Claude 3 Opus (旧版) + else if model_lower.contains("opus") && model_lower.contains("3") { (OPUS_3_INPUT_PRICE, OPUS_3_OUTPUT_PRICE, OPUS_3_CACHE_WRITE_PRICE, OPUS_3_CACHE_READ_PRICE) } - // Claude 3.5 Haiku - else if model_lower.contains("haiku") { - (HAIKU_3_INPUT_PRICE, HAIKU_3_OUTPUT_PRICE, HAIKU_3_CACHE_WRITE_PRICE, HAIKU_3_CACHE_READ_PRICE) - } - // 默认 Sonnet(通用后备) + // 默认 Sonnet(未明确版本号时) else if model_lower.contains("sonnet") { (SONNET_3_INPUT_PRICE, SONNET_3_OUTPUT_PRICE, SONNET_3_CACHE_WRITE_PRICE, SONNET_3_CACHE_READ_PRICE) } + // 默认 Opus(未明确版本号时,假设是最新版) + else if model_lower.contains("opus") { + (OPUS_4_1_INPUT_PRICE, OPUS_4_1_OUTPUT_PRICE, OPUS_4_1_CACHE_WRITE_PRICE, OPUS_4_1_CACHE_READ_PRICE) + } // 未知模型 else { log::warn!("Unknown model for cost calculation: {}", model_lower); - (0.0, 0.0, 0.0, 0.0) + // 默认使用 Sonnet 3 的价格(保守估计) + (SONNET_3_INPUT_PRICE, SONNET_3_OUTPUT_PRICE, SONNET_3_CACHE_WRITE_PRICE, SONNET_3_CACHE_READ_PRICE) } } -fn parse_jsonl_file( +pub fn parse_jsonl_file( path: &PathBuf, encoded_project_name: &str, processed_hashes: &mut HashSet, @@ -264,13 +272,25 @@ fn parse_jsonl_file( continue; } - let cost = entry.cost_usd.unwrap_or_else(|| { - if let Some(model_str) = &message.model { - calculate_cost(model_str, usage) - } else { - 0.0 + // 始终重新计算成本,不信任JSONL中的costUSD字段 + // 因为可能存在价格变化或计算错误 + let cost = if let Some(model_str) = &message.model { + calculate_cost(model_str, usage) + } else { + 0.0 + }; + + // 如果JSONL中有成本,可以记录差异用于调试 + if let Some(jsonl_cost) = entry.cost_usd { + if (jsonl_cost - cost).abs() > 0.0001 { + log::debug!( + "Cost difference for model {}: JSONL={:.4}, Calculated={:.4}", + message.model.as_ref().unwrap_or(&"unknown".to_string()), + jsonl_cost, + cost + ); } - }); + } // Use actual project path if found, otherwise use encoded name let project_path = actual_project_path @@ -324,7 +344,7 @@ fn get_earliest_timestamp(path: &PathBuf) -> Option { None } -fn get_all_usage_entries(claude_path: &PathBuf) -> Vec { +pub fn get_all_usage_entries(claude_path: &PathBuf) -> Vec { let mut all_entries = Vec::new(); let mut processed_hashes = HashSet::new(); let projects_dir = claude_path.join("projects"); diff --git a/src-tauri/src/commands/usage_cache.rs b/src-tauri/src/commands/usage_cache.rs new file mode 100644 index 0000000..32c68f8 --- /dev/null +++ b/src-tauri/src/commands/usage_cache.rs @@ -0,0 +1,571 @@ +use chrono::{Local, Utc}; +use rusqlite::{params, Connection}; +use serde::{Deserialize, Serialize}; +use std::collections::{HashMap, HashSet}; +use std::fs; +use std::path::{Path, PathBuf}; +use std::sync::{Arc, Mutex}; +use tauri::{command, State}; +use walkdir::WalkDir; + +use super::usage::{ + UsageStats, ModelUsage, DailyUsage, ProjectUsage, UsageEntry, + parse_jsonl_file +}; + +#[derive(Default)] +pub struct UsageCacheState { + pub conn: Arc>>, + pub last_scan_time: Arc>>, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct ScanResult { + pub files_scanned: u32, + pub entries_added: u32, + pub entries_skipped: u32, + pub scan_time_ms: u64, +} + +fn db_path() -> PathBuf { + dirs::home_dir() + .unwrap_or_else(|| PathBuf::from(".")) + .join(".claudia/cache/usage_stats.sqlite") +} + +fn ensure_parent_dir(p: &Path) -> std::io::Result<()> { + if let Some(dir) = p.parent() { + std::fs::create_dir_all(dir)?; + } + Ok(()) +} + +pub fn init_cache_db() -> rusqlite::Result { + let path = db_path(); + ensure_parent_dir(&path).map_err(|e| rusqlite::Error::ToSqlConversionFailure(Box::new(e)))?; + + let conn = Connection::open(path)?; + conn.pragma_update(None, "journal_mode", &"WAL")?; + + // Create schema + conn.execute_batch( + r#" + CREATE TABLE IF NOT EXISTS schema_version (version INTEGER PRIMARY KEY); + INSERT OR IGNORE INTO schema_version(version) VALUES (1); + + -- File scan records + CREATE TABLE IF NOT EXISTS scanned_files ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + file_path TEXT NOT NULL UNIQUE, + file_size INTEGER NOT NULL, + mtime_ms INTEGER NOT NULL, + last_scanned_ms INTEGER NOT NULL, + entry_count INTEGER DEFAULT 0 + ); + CREATE INDEX IF NOT EXISTS idx_files_path ON scanned_files(file_path); + + -- API usage records + CREATE TABLE IF NOT EXISTS usage_entries ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + timestamp TEXT NOT NULL, + model TEXT NOT NULL, + input_tokens INTEGER DEFAULT 0, + output_tokens INTEGER DEFAULT 0, + cache_creation_tokens INTEGER DEFAULT 0, + cache_read_tokens INTEGER DEFAULT 0, + cost REAL NOT NULL, + session_id TEXT NOT NULL, + project_path TEXT NOT NULL, + file_path TEXT NOT NULL, + unique_hash TEXT NOT NULL UNIQUE + ); + CREATE INDEX IF NOT EXISTS idx_entries_timestamp ON usage_entries(timestamp); + CREATE INDEX IF NOT EXISTS idx_entries_project ON usage_entries(project_path); + CREATE INDEX IF NOT EXISTS idx_entries_hash ON usage_entries(unique_hash); + CREATE INDEX IF NOT EXISTS idx_entries_model ON usage_entries(model); + "#, + )?; + + Ok(conn) +} + +fn get_file_mtime_ms(path: &Path) -> i64 { + fs::metadata(path) + .and_then(|m| m.modified()) + .ok() + .and_then(|t| t.duration_since(std::time::UNIX_EPOCH).ok()) + .map(|d| d.as_millis() as i64) + .unwrap_or(0) +} + +fn get_file_size(path: &Path) -> i64 { + fs::metadata(path) + .map(|m| m.len() as i64) + .unwrap_or(0) +} + +fn generate_unique_hash(entry: &UsageEntry, has_io_tokens: bool, has_cache_tokens: bool) -> String { + if has_io_tokens { + // For I/O tokens: use session_id + timestamp + model + format!("io:{}:{}:{}", entry.session_id, entry.timestamp, entry.model) + } else if has_cache_tokens { + // For cache tokens: use timestamp + model + project + format!("cache:{}:{}:{}", entry.timestamp, entry.model, entry.project_path) + } else { + // Fallback + format!("other:{}:{}", entry.timestamp, entry.session_id) + } +} + +#[command] +pub async fn usage_scan_update(state: State<'_, UsageCacheState>) -> Result { + let start_time = Utc::now().timestamp_millis(); + + // Initialize or get connection + let mut conn_guard = state.conn.lock().map_err(|e| e.to_string())?; + if conn_guard.is_none() { + *conn_guard = Some(init_cache_db().map_err(|e| e.to_string())?); + } + let conn = conn_guard.as_mut().unwrap(); + + let claude_path = dirs::home_dir() + .ok_or("Failed to get home directory")? + .join(".claude"); + + let projects_dir = claude_path.join("projects"); + + // Get existing scanned files from DB + let mut existing_files: HashMap = HashMap::new(); + { + let mut stmt = conn + .prepare("SELECT file_path, file_size, mtime_ms FROM scanned_files") + .map_err(|e| e.to_string())?; + + let rows = stmt.query_map(params![], |row| { + Ok(( + row.get::<_, String>(0)?, + (row.get::<_, i64>(1)?, row.get::<_, i64>(2)?), + )) + }).map_err(|e| e.to_string())?; + + for row in rows { + if let Ok((path, data)) = row { + existing_files.insert(path, data); + } + } + } + + // Find all .jsonl files + let mut files_to_process = Vec::new(); + let mut all_current_files = HashSet::new(); + + if let Ok(projects) = fs::read_dir(&projects_dir) { + for project in projects.flatten() { + if project.file_type().map(|t| t.is_dir()).unwrap_or(false) { + let project_name = project.file_name().to_string_lossy().to_string(); + let project_path = project.path(); + + WalkDir::new(&project_path) + .into_iter() + .filter_map(Result::ok) + .filter(|e| e.path().extension().and_then(|s| s.to_str()) == Some("jsonl")) + .for_each(|entry| { + let path = entry.path().to_path_buf(); + let path_str = path.to_string_lossy().to_string(); + all_current_files.insert(path_str.clone()); + + // Check if file needs processing + let current_size = get_file_size(&path); + let current_mtime = get_file_mtime_ms(&path); + + let needs_processing = if let Some((stored_size, stored_mtime)) = existing_files.get(&path_str) { + current_size != *stored_size || current_mtime != *stored_mtime + } else { + true // New file + }; + + if needs_processing { + files_to_process.push((path, project_name.clone())); + } + }); + } + } + } + + let mut files_scanned = 0u32; + let mut entries_added = 0u32; + let mut entries_skipped = 0u32; + + // Process files that need updating + let tx = conn.transaction().map_err(|e| e.to_string())?; + + for (file_path, project_name) in files_to_process { + let path_str = file_path.to_string_lossy().to_string(); + let file_size = get_file_size(&file_path); + let mtime_ms = get_file_mtime_ms(&file_path); + + // Parse the JSONL file and get entries + let mut processed_hashes = HashSet::new(); + let entries = parse_jsonl_file(&file_path, &project_name, &mut processed_hashes); + + // Insert or update file record + tx.execute( + "INSERT INTO scanned_files (file_path, file_size, mtime_ms, last_scanned_ms, entry_count) + VALUES (?1, ?2, ?3, ?4, ?5) + ON CONFLICT(file_path) DO UPDATE SET + file_size = excluded.file_size, + mtime_ms = excluded.mtime_ms, + last_scanned_ms = excluded.last_scanned_ms, + entry_count = excluded.entry_count", + params![path_str, file_size, mtime_ms, start_time, entries.len() as i64], + ).map_err(|e| e.to_string())?; + + // Insert usage entries + for entry in entries { + let has_io_tokens = entry.input_tokens > 0 || entry.output_tokens > 0; + let has_cache_tokens = entry.cache_creation_tokens > 0 || entry.cache_read_tokens > 0; + let unique_hash = generate_unique_hash(&entry, has_io_tokens, has_cache_tokens); + + let result = tx.execute( + "INSERT INTO usage_entries ( + timestamp, model, input_tokens, output_tokens, + cache_creation_tokens, cache_read_tokens, cost, + session_id, project_path, file_path, unique_hash + ) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11) + ON CONFLICT(unique_hash) DO NOTHING", + params![ + entry.timestamp, + entry.model, + entry.input_tokens as i64, + entry.output_tokens as i64, + entry.cache_creation_tokens as i64, + entry.cache_read_tokens as i64, + entry.cost, + entry.session_id, + entry.project_path, + path_str, + unique_hash, + ], + ); + + match result { + Ok(n) if n > 0 => entries_added += 1, + _ => entries_skipped += 1, + } + } + + files_scanned += 1; + } + + // Remove entries for files that no longer exist + for (old_path, _) in existing_files { + if !all_current_files.contains(&old_path) { + tx.execute("DELETE FROM usage_entries WHERE file_path = ?1", params![old_path]) + .map_err(|e| e.to_string())?; + tx.execute("DELETE FROM scanned_files WHERE file_path = ?1", params![old_path]) + .map_err(|e| e.to_string())?; + } + } + + tx.commit().map_err(|e| e.to_string())?; + + // Update last scan time + let mut last_scan = state.last_scan_time.lock().map_err(|e| e.to_string())?; + *last_scan = Some(start_time); + + let scan_time_ms = (Utc::now().timestamp_millis() - start_time) as u64; + + Ok(ScanResult { + files_scanned, + entries_added, + entries_skipped, + scan_time_ms, + }) +} + +#[command] +pub async fn usage_get_stats_cached( + days: Option, + state: State<'_, UsageCacheState>, +) -> Result { + // First ensure cache is up to date + usage_scan_update(state.clone()).await?; + + let conn_guard = state.conn.lock().map_err(|e| e.to_string())?; + let conn = conn_guard.as_ref().ok_or("Database not initialized")?; + + // Build date filter + let date_filter = if let Some(d) = days { + let cutoff = Local::now().naive_local().date() - chrono::Duration::days(d as i64); + Some(cutoff.format("%Y-%m-%d").to_string()) + } else { + None + }; + + // Query total stats + let (total_cost, total_input, total_output, total_cache_creation, total_cache_read): (f64, i64, i64, i64, i64) = + if let Some(cutoff) = &date_filter { + conn.query_row( + "SELECT + COALESCE(SUM(cost), 0.0), + COALESCE(SUM(input_tokens), 0), + COALESCE(SUM(output_tokens), 0), + COALESCE(SUM(cache_creation_tokens), 0), + COALESCE(SUM(cache_read_tokens), 0) + FROM usage_entries + WHERE timestamp >= ?1", + params![cutoff], + |row| Ok((row.get(0)?, row.get(1)?, row.get(2)?, row.get(3)?, row.get(4)?)), + ).map_err(|e| e.to_string())? + } else { + conn.query_row( + "SELECT + COALESCE(SUM(cost), 0.0), + COALESCE(SUM(input_tokens), 0), + COALESCE(SUM(output_tokens), 0), + COALESCE(SUM(cache_creation_tokens), 0), + COALESCE(SUM(cache_read_tokens), 0) + FROM usage_entries", + params![], + |row| Ok((row.get(0)?, row.get(1)?, row.get(2)?, row.get(3)?, row.get(4)?)), + ).map_err(|e| e.to_string())? + }; + + let total_tokens = total_input + total_output + total_cache_creation + total_cache_read; + + // Get session count + let total_sessions: i64 = if let Some(cutoff) = &date_filter { + conn.query_row( + "SELECT COUNT(DISTINCT session_id) FROM usage_entries WHERE timestamp >= ?1", + params![cutoff], + |row| row.get(0), + ).map_err(|e| e.to_string())? + } else { + conn.query_row( + "SELECT COUNT(DISTINCT session_id) FROM usage_entries", + params![], + |row| row.get(0), + ).map_err(|e| e.to_string())? + }; + + // Get stats by model + let mut by_model = Vec::new(); + { + let query = if date_filter.is_some() { + "SELECT + model, + SUM(cost) as total_cost, + SUM(input_tokens) as input, + SUM(output_tokens) as output, + SUM(cache_creation_tokens) as cache_creation, + SUM(cache_read_tokens) as cache_read, + COUNT(DISTINCT session_id) as sessions + FROM usage_entries + WHERE timestamp >= ?1 + GROUP BY model + ORDER BY total_cost DESC" + } else { + "SELECT + model, + SUM(cost) as total_cost, + SUM(input_tokens) as input, + SUM(output_tokens) as output, + SUM(cache_creation_tokens) as cache_creation, + SUM(cache_read_tokens) as cache_read, + COUNT(DISTINCT session_id) as sessions + FROM usage_entries + GROUP BY model + ORDER BY total_cost DESC" + }; + + let mut stmt = conn.prepare(query).map_err(|e| e.to_string())?; + + // Create closure once to avoid type mismatch + let create_model_usage = |row: &rusqlite::Row| -> rusqlite::Result { + Ok(ModelUsage { + model: row.get(0)?, + total_cost: row.get(1)?, + input_tokens: row.get::<_, i64>(2)? as u64, + output_tokens: row.get::<_, i64>(3)? as u64, + cache_creation_tokens: row.get::<_, i64>(4)? as u64, + cache_read_tokens: row.get::<_, i64>(5)? as u64, + session_count: row.get::<_, i64>(6)? as u64, + total_tokens: 0, // Will calculate below + }) + }; + + let rows = if let Some(cutoff) = &date_filter { + stmt.query_map(params![cutoff], create_model_usage).map_err(|e| e.to_string())? + } else { + stmt.query_map(params![], create_model_usage).map_err(|e| e.to_string())? + }; + + for row in rows { + if let Ok(mut usage) = row { + usage.total_tokens = usage.input_tokens + usage.output_tokens + + usage.cache_creation_tokens + usage.cache_read_tokens; + by_model.push(usage); + } + } + } + + // Get daily stats + let mut by_date = Vec::new(); + { + let query = if date_filter.is_some() { + "SELECT + DATE(timestamp) as date, + SUM(cost) as total_cost, + SUM(input_tokens) as input, + SUM(output_tokens) as output, + SUM(cache_creation_tokens) as cache_creation, + SUM(cache_read_tokens) as cache_read, + COUNT(DISTINCT session_id) as sessions, + COUNT(*) as requests, + GROUP_CONCAT(DISTINCT model) as models + FROM usage_entries + WHERE timestamp >= ?1 + GROUP BY DATE(timestamp) + ORDER BY date DESC" + } else { + "SELECT + DATE(timestamp) as date, + SUM(cost) as total_cost, + SUM(input_tokens) as input, + SUM(output_tokens) as output, + SUM(cache_creation_tokens) as cache_creation, + SUM(cache_read_tokens) as cache_read, + COUNT(DISTINCT session_id) as sessions, + COUNT(*) as requests, + GROUP_CONCAT(DISTINCT model) as models + FROM usage_entries + GROUP BY DATE(timestamp) + ORDER BY date DESC" + }; + + let mut stmt = conn.prepare(query).map_err(|e| e.to_string())?; + + // Create closure once to avoid type mismatch + let create_daily_usage = |row: &rusqlite::Row| -> rusqlite::Result { + let models_str: String = row.get(8)?; + let models_used: Vec = models_str.split(',').map(|s| s.to_string()).collect(); + + Ok(DailyUsage { + date: row.get(0)?, + total_cost: row.get(1)?, + total_tokens: (row.get::<_, i64>(2)? + row.get::<_, i64>(3)? + + row.get::<_, i64>(4)? + row.get::<_, i64>(5)?) as u64, + input_tokens: row.get::<_, i64>(2)? as u64, + output_tokens: row.get::<_, i64>(3)? as u64, + cache_creation_tokens: row.get::<_, i64>(4)? as u64, + cache_read_tokens: row.get::<_, i64>(5)? as u64, + request_count: row.get::<_, i64>(7)? as u64, + models_used, + }) + }; + + let rows = if let Some(cutoff) = &date_filter { + stmt.query_map(params![cutoff], create_daily_usage).map_err(|e| e.to_string())? + } else { + stmt.query_map(params![], create_daily_usage).map_err(|e| e.to_string())? + }; + + for row in rows { + if let Ok(daily) = row { + by_date.push(daily); + } + } + } + + // Get project stats + let mut by_project = Vec::new(); + { + let query = if date_filter.is_some() { + "SELECT + project_path, + SUM(cost) as total_cost, + SUM(input_tokens + output_tokens + cache_creation_tokens + cache_read_tokens) as total_tokens, + COUNT(DISTINCT session_id) as sessions, + MAX(timestamp) as last_used + FROM usage_entries + WHERE timestamp >= ?1 + GROUP BY project_path + ORDER BY total_cost DESC" + } else { + "SELECT + project_path, + SUM(cost) as total_cost, + SUM(input_tokens + output_tokens + cache_creation_tokens + cache_read_tokens) as total_tokens, + COUNT(DISTINCT session_id) as sessions, + MAX(timestamp) as last_used + FROM usage_entries + GROUP BY project_path + ORDER BY total_cost DESC" + }; + + let mut stmt = conn.prepare(query).map_err(|e| e.to_string())?; + + // Create closure once to avoid type mismatch + let create_project_usage = |row: &rusqlite::Row| -> rusqlite::Result { + Ok(ProjectUsage { + project_path: row.get(0)?, + project_name: String::new(), // Will be extracted from path + total_cost: row.get(1)?, + total_tokens: row.get::<_, i64>(2)? as u64, + session_count: row.get::<_, i64>(3)? as u64, + last_used: row.get(4)?, + }) + }; + + let rows = if let Some(cutoff) = &date_filter { + stmt.query_map(params![cutoff], create_project_usage).map_err(|e| e.to_string())? + } else { + stmt.query_map(params![], create_project_usage).map_err(|e| e.to_string())? + }; + + for row in rows { + if let Ok(mut project) = row { + // Extract project name from path + project.project_name = project.project_path + .split('/') + .last() + .unwrap_or(&project.project_path) + .to_string(); + by_project.push(project); + } + } + } + + Ok(UsageStats { + total_cost, + total_tokens: total_tokens as u64, + total_input_tokens: total_input as u64, + total_output_tokens: total_output as u64, + total_cache_creation_tokens: total_cache_creation as u64, + total_cache_read_tokens: total_cache_read as u64, + total_sessions: total_sessions as u64, + by_model, + by_date, + by_project, + }) +} + +#[command] +pub async fn usage_clear_cache(state: State<'_, UsageCacheState>) -> Result { + let mut conn_guard = state.conn.lock().map_err(|e| e.to_string())?; + + if let Some(conn) = conn_guard.as_mut() { + conn.execute("DELETE FROM usage_entries", params![]) + .map_err(|e| e.to_string())?; + conn.execute("DELETE FROM scanned_files", params![]) + .map_err(|e| e.to_string())?; + + // 重置last scan time + let mut last_scan = state.last_scan_time.lock().map_err(|e| e.to_string())?; + *last_scan = None; + + return Ok("Cache cleared successfully. All costs will be recalculated.".to_string()); + } + + Ok("No cache to clear.".to_string()) +} \ No newline at end of file diff --git a/src-tauri/src/main.rs b/src-tauri/src/main.rs index b395c61..0ea1379 100644 --- a/src-tauri/src/main.rs +++ b/src-tauri/src/main.rs @@ -43,6 +43,9 @@ use commands::usage::{ use commands::usage_index::{ usage_get_summary, usage_import_diffs, usage_scan_index, usage_scan_progress, UsageIndexState, }; +use commands::usage_cache::{ + usage_scan_update, usage_get_stats_cached, usage_clear_cache, UsageCacheState, +}; use commands::storage::{ storage_list_tables, storage_read_table, storage_update_row, storage_delete_row, storage_insert_row, storage_execute_sql, storage_reset_database, @@ -165,6 +168,7 @@ fn main() { // Initialize Usage Index state app.manage(UsageIndexState::default()); + app.manage(UsageCacheState::default()); Ok(()) }) @@ -253,6 +257,11 @@ fn main() { usage_get_summary, usage_import_diffs, + // Usage Cache Management + usage_scan_update, + usage_get_stats_cached, + usage_clear_cache, + // MCP (Model Context Protocol) mcp_add, mcp_list, diff --git a/src/components/UsageDashboard.tsx b/src/components/UsageDashboard.tsx index 77ab05a..808a206 100644 --- a/src/components/UsageDashboard.tsx +++ b/src/components/UsageDashboard.tsx @@ -74,9 +74,14 @@ export const UsageDashboard: React.FC = ({ onBack }) => { statsData = await api.getUsageStats(); sessionData = await api.getSessionStats(); } else { + const days = selectedDateRange === "7d" ? 7 : 30; + + // 使用缓存版本的API,传入天数参数 + statsData = await api.getUsageStats(days); + + // 对于session数据,继续使用日期范围方式 const endDate = new Date(); const startDate = new Date(); - const days = selectedDateRange === "7d" ? 7 : 30; startDate.setDate(startDate.getDate() - days); const formatDateForApi = (date: Date) => { @@ -86,10 +91,6 @@ export const UsageDashboard: React.FC = ({ onBack }) => { return `${year}${month}${day}`; } - statsData = await api.getUsageByDateRange( - startDate.toISOString(), - endDate.toISOString() - ); sessionData = await api.getSessionStats( formatDateForApi(startDate), formatDateForApi(endDate), @@ -774,10 +775,8 @@ export const UsageDashboard: React.FC = ({ onBack }) => { ({ name: project.project_path.split('/').slice(-1)[0], - input: project.input_tokens / 1000, - output: project.output_tokens / 1000, - cacheWrite: project.cache_creation_tokens / 1000, - cacheRead: project.cache_read_tokens / 1000 + totalTokens: project.total_tokens / 1000, + cost: project.total_cost }))} margin={{ top: 5, right: 30, left: 20, bottom: 60 }} > @@ -795,6 +794,13 @@ export const UsageDashboard: React.FC = ({ onBack }) => { tickFormatter={(value) => `${value}K`} className="text-muted-foreground" /> + `$${value.toFixed(2)}`} + className="text-muted-foreground" + /> = ({ onBack }) => { boxShadow: '0 4px 12px rgba(0, 0, 0, 0.08), 0 2px 4px rgba(0, 0, 0, 0.04)', backdropFilter: 'blur(8px)' }} - formatter={(value: number) => `${formatTokens(value * 1000)} tokens`} + formatter={(value: number, name: string) => { + if (name === 'totalTokens') { + return `${formatTokens(value * 1000)} tokens`; + } else if (name === 'cost') { + return `$${value.toFixed(2)}`; + } + return value; + }} /> { const nameMap: Record = { - 'input': t('usage.inputTokens'), - 'output': t('usage.outputTokens'), - 'cacheWrite': t('usage.cacheWrite'), - 'cacheRead': t('usage.cacheRead') + 'totalTokens': t('usage.totalTokens'), + 'cost': t('usage.cost') }; return nameMap[value] || value; }} /> - - - - + + diff --git a/src/lib/api.ts b/src/lib/api.ts index 115c2be..4030cfa 100644 --- a/src/lib/api.ts +++ b/src/lib/api.ts @@ -1198,14 +1198,22 @@ export const api = { /** * Gets overall usage statistics + * @param days - Optional number of days to look back * @returns Promise resolving to usage statistics */ - async getUsageStats(): Promise { + async getUsageStats(days?: number): Promise { try { - return await invoke("get_usage_stats"); + // 使用缓存版本的API,它会自动更新缓存 + return await invoke("usage_get_stats_cached", { days }); } catch (error) { - console.error("Failed to get usage stats:", error); - throw error; + console.error("Failed to get cached usage stats, falling back to direct scan:", error); + // 如果缓存版本失败,回退到原版本 + try { + return await invoke("get_usage_stats", { days }); + } catch (fallbackError) { + console.error("Fallback to original API also failed:", fallbackError); + throw error; + } } }, @@ -1262,6 +1270,19 @@ export const api = { } }, + /** + * Clears the usage cache and forces recalculation + * @returns Promise resolving to success message + */ + async clearUsageCache(): Promise { + try { + return await invoke("usage_clear_cache"); + } catch (error) { + console.error("Failed to clear usage cache:", error); + throw error; + } + }, + /** * Creates a checkpoint for the current session state */