Files
claudia/src-tauri/src/commands/usage.rs
2025-08-10 07:10:47 +08:00

902 lines
33 KiB
Rust
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

use chrono::{DateTime, Local, NaiveDate};
use serde::{Deserialize, Serialize};
use serde_json;
use std::collections::{HashMap, HashSet};
use std::fs;
use std::path::PathBuf;
use tauri::command;
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct UsageEntry {
pub timestamp: String,
pub model: String,
pub input_tokens: u64,
pub output_tokens: u64,
pub cache_creation_tokens: u64,
pub cache_read_tokens: u64,
pub cost: f64,
pub session_id: String,
pub project_path: String,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct UsageStats {
pub total_cost: f64,
pub total_tokens: u64,
pub total_input_tokens: u64,
pub total_output_tokens: u64,
pub total_cache_creation_tokens: u64,
pub total_cache_read_tokens: u64,
pub total_sessions: u64,
pub by_model: Vec<ModelUsage>,
pub by_date: Vec<DailyUsage>,
pub by_project: Vec<ProjectUsage>,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct ModelUsage {
pub model: String,
pub total_cost: f64,
pub total_tokens: u64,
pub input_tokens: u64,
pub output_tokens: u64,
pub cache_creation_tokens: u64,
pub cache_read_tokens: u64,
pub session_count: u64,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct DailyUsage {
pub date: String,
pub total_cost: f64,
pub total_tokens: u64,
// New detailed per-day breakdowns
pub input_tokens: u64,
pub output_tokens: u64,
pub cache_creation_tokens: u64,
pub cache_read_tokens: u64,
pub request_count: u64,
pub models_used: Vec<String>,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct ProjectUsage {
pub project_path: String,
pub project_name: String,
pub total_cost: f64,
pub total_tokens: u64,
pub session_count: u64,
pub last_used: String,
}
// Claude pricing constants (per million tokens)
// 最新价格表 (2025-01)
// Claude 4.x 系列
const OPUS_4_1_INPUT_PRICE: f64 = 15.0; // Opus 4.1
const OPUS_4_1_OUTPUT_PRICE: f64 = 75.0;
const OPUS_4_1_CACHE_WRITE_PRICE: f64 = 18.75;
const OPUS_4_1_CACHE_READ_PRICE: f64 = 1.50; // 更新为 1.50
const SONNET_4_INPUT_PRICE: f64 = 3.0; // Sonnet 4
const SONNET_4_OUTPUT_PRICE: f64 = 15.0;
const SONNET_4_CACHE_WRITE_PRICE: f64 = 3.75;
const SONNET_4_CACHE_READ_PRICE: f64 = 0.30;
// Claude 3.x 系列 (旧版本,价格可能不同)
// Sonnet 3.7/3.5 - 假设与 Sonnet 4 相同
const SONNET_3_INPUT_PRICE: f64 = 3.0;
const SONNET_3_OUTPUT_PRICE: f64 = 15.0;
const SONNET_3_CACHE_WRITE_PRICE: f64 = 3.75;
const SONNET_3_CACHE_READ_PRICE: f64 = 0.30;
// Opus 3 - 假设与 Opus 4.1 相同
const OPUS_3_INPUT_PRICE: f64 = 15.0;
const OPUS_3_OUTPUT_PRICE: f64 = 75.0;
const OPUS_3_CACHE_WRITE_PRICE: f64 = 18.75;
const OPUS_3_CACHE_READ_PRICE: f64 = 1.50;
// Haiku 3.5 - 最具性价比
const HAIKU_3_5_INPUT_PRICE: f64 = 0.80;
const HAIKU_3_5_OUTPUT_PRICE: f64 = 4.0;
const HAIKU_3_5_CACHE_WRITE_PRICE: f64 = 1.0;
const HAIKU_3_5_CACHE_READ_PRICE: f64 = 0.08;
#[derive(Debug, Deserialize)]
struct JsonlEntry {
timestamp: String,
message: Option<MessageData>,
#[serde(rename = "sessionId")]
session_id: Option<String>,
#[serde(rename = "requestId")]
request_id: Option<String>,
#[serde(rename = "costUSD")]
#[allow(dead_code)]
cost_usd: Option<f64>,
}
#[derive(Debug, Deserialize)]
struct MessageData {
id: Option<String>,
model: Option<String>,
usage: Option<UsageData>,
}
#[derive(Debug, Deserialize)]
struct UsageData {
input_tokens: Option<u64>,
output_tokens: Option<u64>,
cache_creation_input_tokens: Option<u64>,
cache_read_input_tokens: Option<u64>,
}
fn calculate_cost(model: &str, usage: &UsageData) -> f64 {
let input_tokens = usage.input_tokens.unwrap_or(0) as f64;
let output_tokens = usage.output_tokens.unwrap_or(0) as f64;
let cache_creation_tokens = usage.cache_creation_input_tokens.unwrap_or(0) as f64;
let cache_read_tokens = usage.cache_read_input_tokens.unwrap_or(0) as f64;
// 智能模型匹配,支持多种格式
let model_lower = model.to_lowercase();
let (input_price, output_price, cache_write_price, cache_read_price) =
match_model_prices(&model_lower);
// 计算成本(价格为每百万令牌)
let cost = (input_tokens * input_price / 1_000_000.0)
+ (output_tokens * output_price / 1_000_000.0)
+ (cache_creation_tokens * cache_write_price / 1_000_000.0)
+ (cache_read_tokens * cache_read_price / 1_000_000.0);
cost
}
// 独立的模型价格匹配函数,更精确的模型识别
fn match_model_prices(model_lower: &str) -> (f64, f64, f64, f64) {
// Claude Opus 4.1 (最新最强)
if model_lower.contains("opus") && (model_lower.contains("4-1") || model_lower.contains("4.1")) {
(OPUS_4_1_INPUT_PRICE, OPUS_4_1_OUTPUT_PRICE, OPUS_4_1_CACHE_WRITE_PRICE, OPUS_4_1_CACHE_READ_PRICE)
}
// Claude Sonnet 4
else if model_lower.contains("sonnet") && (model_lower.contains("-4-") || model_lower.contains("sonnet-4")) {
(SONNET_4_INPUT_PRICE, SONNET_4_OUTPUT_PRICE, SONNET_4_CACHE_WRITE_PRICE, SONNET_4_CACHE_READ_PRICE)
}
// Claude Haiku 3.5
else if model_lower.contains("haiku") {
(HAIKU_3_5_INPUT_PRICE, HAIKU_3_5_OUTPUT_PRICE, HAIKU_3_5_CACHE_WRITE_PRICE, HAIKU_3_5_CACHE_READ_PRICE)
}
// Claude 3.x Sonnet 系列3.7, 3.5
else if model_lower.contains("sonnet") &&
(model_lower.contains("3-7") || model_lower.contains("3.7") ||
model_lower.contains("3-5") || model_lower.contains("3.5")) {
(SONNET_3_INPUT_PRICE, SONNET_3_OUTPUT_PRICE, SONNET_3_CACHE_WRITE_PRICE, SONNET_3_CACHE_READ_PRICE)
}
// Claude 3 Opus (旧版)
else if model_lower.contains("opus") && model_lower.contains("3") {
(OPUS_3_INPUT_PRICE, OPUS_3_OUTPUT_PRICE, OPUS_3_CACHE_WRITE_PRICE, OPUS_3_CACHE_READ_PRICE)
}
// 默认 Sonnet未明确版本号时
else if model_lower.contains("sonnet") {
(SONNET_3_INPUT_PRICE, SONNET_3_OUTPUT_PRICE, SONNET_3_CACHE_WRITE_PRICE, SONNET_3_CACHE_READ_PRICE)
}
// 默认 Opus未明确版本号时假设是最新版
else if model_lower.contains("opus") {
(OPUS_4_1_INPUT_PRICE, OPUS_4_1_OUTPUT_PRICE, OPUS_4_1_CACHE_WRITE_PRICE, OPUS_4_1_CACHE_READ_PRICE)
}
// 未知模型
else {
log::warn!("Unknown model for cost calculation: {}", model_lower);
// 默认使用 Sonnet 3 的价格(保守估计)
(SONNET_3_INPUT_PRICE, SONNET_3_OUTPUT_PRICE, SONNET_3_CACHE_WRITE_PRICE, SONNET_3_CACHE_READ_PRICE)
}
}
pub fn parse_jsonl_file(
path: &PathBuf,
encoded_project_name: &str,
processed_hashes: &mut HashSet<String>,
) -> Vec<UsageEntry> {
let mut entries = Vec::new();
let mut actual_project_path: Option<String> = None;
if let Ok(content) = fs::read_to_string(path) {
// Extract session ID from the file path
let session_id = path
.parent()
.and_then(|p| p.file_name())
.and_then(|n| n.to_str())
.unwrap_or("unknown")
.to_string();
for line in content.lines() {
if line.trim().is_empty() {
continue;
}
if let Ok(json_value) = serde_json::from_str::<serde_json::Value>(line) {
// Extract the actual project path from cwd if we haven't already
if actual_project_path.is_none() {
if let Some(cwd) = json_value.get("cwd").and_then(|v| v.as_str()) {
actual_project_path = Some(cwd.to_string());
}
}
// Try to parse as JsonlEntry for usage data
if let Ok(entry) = serde_json::from_value::<JsonlEntry>(json_value) {
if let Some(message) = &entry.message {
if let Some(usage) = &message.usage {
// 跳过所有令牌数为0的记录根据文档规范
let has_tokens = usage.input_tokens.unwrap_or(0) > 0
|| usage.output_tokens.unwrap_or(0) > 0
|| usage.cache_creation_input_tokens.unwrap_or(0) > 0
|| usage.cache_read_input_tokens.unwrap_or(0) > 0;
if !has_tokens {
continue;
}
// 智能去重策略
let has_io_tokens = usage.input_tokens.unwrap_or(0) > 0
|| usage.output_tokens.unwrap_or(0) > 0;
let has_cache_tokens = usage.cache_creation_input_tokens.unwrap_or(0) > 0
|| usage.cache_read_input_tokens.unwrap_or(0) > 0;
let should_skip = if has_io_tokens {
// 输入输出令牌:使用 session_id + message_id 严格去重
if let Some(msg_id) = &message.id {
let unique_hash = format!("io:{}:{}", &session_id, msg_id);
if processed_hashes.contains(&unique_hash) {
true
} else {
processed_hashes.insert(unique_hash);
false
}
} else {
false
}
} else if has_cache_tokens {
// 缓存令牌:使用 message_id + request_id 宽松去重
if let (Some(msg_id), Some(req_id)) = (&message.id, &entry.request_id) {
let unique_hash = format!("cache:{}:{}", msg_id, req_id);
if processed_hashes.contains(&unique_hash) {
true
} else {
processed_hashes.insert(unique_hash);
false
}
} else {
false
}
} else {
false
};
if should_skip {
continue;
}
// 始终重新计算成本不信任JSONL中的costUSD字段
// 因为可能存在价格变化或计算错误
let cost = if let Some(model_str) = &message.model {
calculate_cost(model_str, usage)
} else {
0.0
};
// Use actual project path if found, otherwise use encoded name
let project_path = actual_project_path
.clone()
.unwrap_or_else(|| encoded_project_name.to_string());
entries.push(UsageEntry {
timestamp: entry.timestamp,
model: message
.model
.clone()
.unwrap_or_else(|| "unknown".to_string()),
input_tokens: usage.input_tokens.unwrap_or(0),
output_tokens: usage.output_tokens.unwrap_or(0),
cache_creation_tokens: usage
.cache_creation_input_tokens
.unwrap_or(0),
cache_read_tokens: usage.cache_read_input_tokens.unwrap_or(0),
cost,
session_id: entry.session_id.unwrap_or_else(|| session_id.clone()),
project_path,
});
}
}
}
}
}
}
entries
}
fn get_earliest_timestamp(path: &PathBuf) -> Option<String> {
if let Ok(content) = fs::read_to_string(path) {
let mut earliest_timestamp: Option<String> = None;
for line in content.lines() {
if let Ok(json_value) = serde_json::from_str::<serde_json::Value>(line) {
if let Some(timestamp_str) = json_value.get("timestamp").and_then(|v| v.as_str()) {
if let Some(current_earliest) = &earliest_timestamp {
if timestamp_str < current_earliest.as_str() {
earliest_timestamp = Some(timestamp_str.to_string());
}
} else {
earliest_timestamp = Some(timestamp_str.to_string());
}
}
}
}
return earliest_timestamp;
}
None
}
pub fn get_all_usage_entries(claude_path: &PathBuf) -> Vec<UsageEntry> {
let mut all_entries = Vec::new();
let mut processed_hashes = HashSet::new();
let projects_dir = claude_path.join("projects");
let mut files_to_process: Vec<(PathBuf, String)> = Vec::new();
if let Ok(projects) = fs::read_dir(&projects_dir) {
for project in projects.flatten() {
if project.file_type().map(|t| t.is_dir()).unwrap_or(false) {
let project_name = project.file_name().to_string_lossy().to_string();
let project_path = project.path();
walkdir::WalkDir::new(&project_path)
.into_iter()
.filter_map(Result::ok)
.filter(|e| e.path().extension().and_then(|s| s.to_str()) == Some("jsonl"))
.for_each(|entry| {
files_to_process.push((entry.path().to_path_buf(), project_name.clone()));
});
}
}
}
// Sort files by their earliest timestamp to ensure chronological processing
// and deterministic deduplication.
files_to_process.sort_by_cached_key(|(path, _)| get_earliest_timestamp(path));
for (path, project_name) in files_to_process {
let entries = parse_jsonl_file(&path, &project_name, &mut processed_hashes);
all_entries.extend(entries);
}
// Sort by timestamp
all_entries.sort_by(|a, b| a.timestamp.cmp(&b.timestamp));
all_entries
}
#[command]
pub fn get_usage_stats(days: Option<u32>) -> Result<UsageStats, String> {
let claude_path = dirs::home_dir()
.ok_or("Failed to get home directory")?
.join(".claude");
let all_entries = get_all_usage_entries(&claude_path);
if all_entries.is_empty() {
return Ok(UsageStats {
total_cost: 0.0,
total_tokens: 0,
total_input_tokens: 0,
total_output_tokens: 0,
total_cache_creation_tokens: 0,
total_cache_read_tokens: 0,
total_sessions: 0,
by_model: vec![],
by_date: vec![],
by_project: vec![],
});
}
// Filter by days if specified
let filtered_entries = if let Some(days) = days {
// Convert 'now' to local date for consistent comparison
let cutoff = Local::now().with_timezone(&Local).date_naive() - chrono::Duration::days(days as i64);
all_entries
.into_iter()
.filter(|e| {
if let Ok(dt) = DateTime::parse_from_rfc3339(&e.timestamp) {
// Convert each entry timestamp to local time, then compare dates
let local_date = dt.with_timezone(&Local).date_naive();
local_date >= cutoff
} else {
false
}
})
.collect()
} else {
all_entries
};
// Calculate aggregated stats
let mut total_cost = 0.0;
let mut total_input_tokens = 0u64;
let mut total_output_tokens = 0u64;
let mut total_cache_creation_tokens = 0u64;
let mut total_cache_read_tokens = 0u64;
// 使用 HashSet 确保会话唯一性
let mut unique_sessions: HashSet<String> = HashSet::new();
let mut model_sessions: HashMap<String, HashSet<String>> = HashMap::new();
let mut project_sessions: HashMap<String, HashSet<String>> = HashMap::new();
let mut model_stats: HashMap<String, ModelUsage> = HashMap::new();
let mut daily_stats: HashMap<String, DailyUsage> = HashMap::new();
let mut project_stats: HashMap<String, ProjectUsage> = HashMap::new();
for entry in &filtered_entries {
// Update totals
total_cost += entry.cost;
total_input_tokens += entry.input_tokens;
total_output_tokens += entry.output_tokens;
total_cache_creation_tokens += entry.cache_creation_tokens;
total_cache_read_tokens += entry.cache_read_tokens;
// 收集唯一会话
unique_sessions.insert(entry.session_id.clone());
// Update model stats with unique sessions tracking
let model_stat = model_stats
.entry(entry.model.clone())
.or_insert(ModelUsage {
model: entry.model.clone(),
total_cost: 0.0,
total_tokens: 0,
input_tokens: 0,
output_tokens: 0,
cache_creation_tokens: 0,
cache_read_tokens: 0,
session_count: 0,
});
model_stat.total_cost += entry.cost;
model_stat.input_tokens += entry.input_tokens;
model_stat.output_tokens += entry.output_tokens;
model_stat.cache_creation_tokens += entry.cache_creation_tokens;
model_stat.cache_read_tokens += entry.cache_read_tokens;
model_stat.total_tokens = model_stat.input_tokens + model_stat.output_tokens;
// 按模型统计唯一会话
model_sessions
.entry(entry.model.clone())
.or_insert_with(HashSet::new)
.insert(entry.session_id.clone());
// Update daily stats (use local timezone date)
let date = if let Ok(dt) = DateTime::parse_from_rfc3339(&entry.timestamp) {
dt.with_timezone(&Local).date_naive().to_string()
} else {
// Fallback to raw prefix if parse fails
entry
.timestamp
.split('T')
.next()
.unwrap_or(&entry.timestamp)
.to_string()
};
let daily_stat = daily_stats.entry(date.clone()).or_insert(DailyUsage {
date,
total_cost: 0.0,
total_tokens: 0,
input_tokens: 0,
output_tokens: 0,
cache_creation_tokens: 0,
cache_read_tokens: 0,
request_count: 0,
models_used: vec![],
});
daily_stat.total_cost += entry.cost;
daily_stat.input_tokens += entry.input_tokens;
daily_stat.output_tokens += entry.output_tokens;
daily_stat.cache_creation_tokens += entry.cache_creation_tokens;
daily_stat.cache_read_tokens += entry.cache_read_tokens;
daily_stat.total_tokens = daily_stat.input_tokens
+ daily_stat.output_tokens
+ daily_stat.cache_creation_tokens
+ daily_stat.cache_read_tokens;
daily_stat.request_count += 1;
if !daily_stat.models_used.contains(&entry.model) {
daily_stat.models_used.push(entry.model.clone());
}
// Update project stats with unique sessions tracking
let project_stat =
project_stats
.entry(entry.project_path.clone())
.or_insert(ProjectUsage {
project_path: entry.project_path.clone(),
project_name: entry
.project_path
.split('/')
.last()
.unwrap_or(&entry.project_path)
.to_string(),
total_cost: 0.0,
total_tokens: 0,
session_count: 0,
last_used: entry.timestamp.clone(),
});
project_stat.total_cost += entry.cost;
project_stat.total_tokens += entry.input_tokens
+ entry.output_tokens
+ entry.cache_creation_tokens
+ entry.cache_read_tokens;
// 按项目统计唯一会话
project_sessions
.entry(entry.project_path.clone())
.or_insert_with(HashSet::new)
.insert(entry.session_id.clone());
if entry.timestamp > project_stat.last_used {
project_stat.last_used = entry.timestamp.clone();
}
}
// 更新会话计数为唯一会话数
for (model, sessions) in model_sessions {
if let Some(stat) = model_stats.get_mut(&model) {
stat.session_count = sessions.len() as u64;
}
}
for (project, sessions) in project_sessions {
if let Some(stat) = project_stats.get_mut(&project) {
stat.session_count = sessions.len() as u64;
}
}
let total_tokens = total_input_tokens
+ total_output_tokens
+ total_cache_creation_tokens
+ total_cache_read_tokens;
let total_sessions = unique_sessions.len() as u64;
// Convert hashmaps to sorted vectors
let mut by_model: Vec<ModelUsage> = model_stats.into_values().collect();
by_model.sort_by(|a, b| b.total_cost.partial_cmp(&a.total_cost).unwrap());
let mut by_date: Vec<DailyUsage> = daily_stats.into_values().collect();
by_date.sort_by(|a, b| b.date.cmp(&a.date));
let mut by_project: Vec<ProjectUsage> = project_stats.into_values().collect();
by_project.sort_by(|a, b| b.total_cost.partial_cmp(&a.total_cost).unwrap());
Ok(UsageStats {
total_cost,
total_tokens,
total_input_tokens,
total_output_tokens,
total_cache_creation_tokens,
total_cache_read_tokens,
total_sessions,
by_model,
by_date,
by_project,
})
}
#[command]
pub fn get_usage_by_date_range(start_date: String, end_date: String) -> Result<UsageStats, String> {
let claude_path = dirs::home_dir()
.ok_or("Failed to get home directory")?
.join(".claude");
let all_entries = get_all_usage_entries(&claude_path);
// Parse dates
let start = NaiveDate::parse_from_str(&start_date, "%Y-%m-%d").or_else(|_| {
// Try parsing ISO datetime format (convert to local date)
DateTime::parse_from_rfc3339(&start_date)
.map(|dt| dt.with_timezone(&Local).date_naive())
.map_err(|e| format!("Invalid start date: {}", e))
})?;
let end = NaiveDate::parse_from_str(&end_date, "%Y-%m-%d").or_else(|_| {
// Try parsing ISO datetime format (convert to local date)
DateTime::parse_from_rfc3339(&end_date)
.map(|dt| dt.with_timezone(&Local).date_naive())
.map_err(|e| format!("Invalid end date: {}", e))
})?;
// Filter entries by date range
let filtered_entries: Vec<_> = all_entries
.into_iter()
.filter(|e| {
if let Ok(dt) = DateTime::parse_from_rfc3339(&e.timestamp) {
let date = dt.with_timezone(&Local).date_naive();
date >= start && date <= end
} else {
false
}
})
.collect();
if filtered_entries.is_empty() {
return Ok(UsageStats {
total_cost: 0.0,
total_tokens: 0,
total_input_tokens: 0,
total_output_tokens: 0,
total_cache_creation_tokens: 0,
total_cache_read_tokens: 0,
total_sessions: 0,
by_model: vec![],
by_date: vec![],
by_project: vec![],
});
}
// Calculate aggregated stats (same logic as get_usage_stats)
let mut total_cost = 0.0;
let mut total_input_tokens = 0u64;
let mut total_output_tokens = 0u64;
let mut total_cache_creation_tokens = 0u64;
let mut total_cache_read_tokens = 0u64;
// 使用 HashSet 确保会话唯一性
let mut unique_sessions: HashSet<String> = HashSet::new();
let mut model_sessions: HashMap<String, HashSet<String>> = HashMap::new();
let mut project_sessions: HashMap<String, HashSet<String>> = HashMap::new();
let mut model_stats: HashMap<String, ModelUsage> = HashMap::new();
let mut daily_stats: HashMap<String, DailyUsage> = HashMap::new();
let mut project_stats: HashMap<String, ProjectUsage> = HashMap::new();
for entry in &filtered_entries {
// Update totals
total_cost += entry.cost;
total_input_tokens += entry.input_tokens;
total_output_tokens += entry.output_tokens;
total_cache_creation_tokens += entry.cache_creation_tokens;
total_cache_read_tokens += entry.cache_read_tokens;
// 收集唯一会话
unique_sessions.insert(entry.session_id.clone());
// Update model stats
let model_stat = model_stats
.entry(entry.model.clone())
.or_insert(ModelUsage {
model: entry.model.clone(),
total_cost: 0.0,
total_tokens: 0,
input_tokens: 0,
output_tokens: 0,
cache_creation_tokens: 0,
cache_read_tokens: 0,
session_count: 0,
});
model_stat.total_cost += entry.cost;
model_stat.input_tokens += entry.input_tokens;
model_stat.output_tokens += entry.output_tokens;
model_stat.cache_creation_tokens += entry.cache_creation_tokens;
model_stat.cache_read_tokens += entry.cache_read_tokens;
model_stat.total_tokens = model_stat.input_tokens + model_stat.output_tokens;
// 按模型统计唯一会话
model_sessions
.entry(entry.model.clone())
.or_insert_with(HashSet::new)
.insert(entry.session_id.clone());
// Update daily stats (use local timezone date)
let date = if let Ok(dt) = DateTime::parse_from_rfc3339(&entry.timestamp) {
dt.with_timezone(&Local).date_naive().to_string()
} else {
entry
.timestamp
.split('T')
.next()
.unwrap_or(&entry.timestamp)
.to_string()
};
let daily_stat = daily_stats.entry(date.clone()).or_insert(DailyUsage {
date,
total_cost: 0.0,
total_tokens: 0,
input_tokens: 0,
output_tokens: 0,
cache_creation_tokens: 0,
cache_read_tokens: 0,
request_count: 0,
models_used: vec![],
});
daily_stat.total_cost += entry.cost;
daily_stat.input_tokens += entry.input_tokens;
daily_stat.output_tokens += entry.output_tokens;
daily_stat.cache_creation_tokens += entry.cache_creation_tokens;
daily_stat.cache_read_tokens += entry.cache_read_tokens;
daily_stat.total_tokens = daily_stat.input_tokens
+ daily_stat.output_tokens
+ daily_stat.cache_creation_tokens
+ daily_stat.cache_read_tokens;
daily_stat.request_count += 1;
if !daily_stat.models_used.contains(&entry.model) {
daily_stat.models_used.push(entry.model.clone());
}
// Update project stats with unique sessions tracking
let project_stat =
project_stats
.entry(entry.project_path.clone())
.or_insert(ProjectUsage {
project_path: entry.project_path.clone(),
project_name: entry
.project_path
.split('/')
.last()
.unwrap_or(&entry.project_path)
.to_string(),
total_cost: 0.0,
total_tokens: 0,
session_count: 0,
last_used: entry.timestamp.clone(),
});
project_stat.total_cost += entry.cost;
project_stat.total_tokens += entry.input_tokens
+ entry.output_tokens
+ entry.cache_creation_tokens
+ entry.cache_read_tokens;
// 按项目统计唯一会话
project_sessions
.entry(entry.project_path.clone())
.or_insert_with(HashSet::new)
.insert(entry.session_id.clone());
if entry.timestamp > project_stat.last_used {
project_stat.last_used = entry.timestamp.clone();
}
}
// 更新会话计数为唯一会话数
for (model, sessions) in model_sessions {
if let Some(stat) = model_stats.get_mut(&model) {
stat.session_count = sessions.len() as u64;
}
}
for (project, sessions) in project_sessions {
if let Some(stat) = project_stats.get_mut(&project) {
stat.session_count = sessions.len() as u64;
}
}
let total_tokens = total_input_tokens
+ total_output_tokens
+ total_cache_creation_tokens
+ total_cache_read_tokens;
let total_sessions = unique_sessions.len() as u64;
// Convert hashmaps to sorted vectors
let mut by_model: Vec<ModelUsage> = model_stats.into_values().collect();
by_model.sort_by(|a, b| b.total_cost.partial_cmp(&a.total_cost).unwrap());
let mut by_date: Vec<DailyUsage> = daily_stats.into_values().collect();
by_date.sort_by(|a, b| b.date.cmp(&a.date));
let mut by_project: Vec<ProjectUsage> = project_stats.into_values().collect();
by_project.sort_by(|a, b| b.total_cost.partial_cmp(&a.total_cost).unwrap());
Ok(UsageStats {
total_cost,
total_tokens,
total_input_tokens,
total_output_tokens,
total_cache_creation_tokens,
total_cache_read_tokens,
total_sessions,
by_model,
by_date,
by_project,
})
}
#[command]
pub fn get_usage_details(
project_path: Option<String>,
date: Option<String>,
) -> Result<Vec<UsageEntry>, String> {
let claude_path = dirs::home_dir()
.ok_or("Failed to get home directory")?
.join(".claude");
let mut all_entries = get_all_usage_entries(&claude_path);
// Filter by project if specified
if let Some(project) = project_path {
all_entries.retain(|e| e.project_path == project);
}
// Filter by date if specified (compare against local date string YYYY-MM-DD)
if let Some(date) = date {
all_entries.retain(|e| {
if let Ok(dt) = DateTime::parse_from_rfc3339(&e.timestamp) {
let local_date_str = dt.with_timezone(&Local).date_naive().to_string();
local_date_str == date
} else {
false
}
});
}
Ok(all_entries)
}
#[command]
pub fn get_session_stats(
since: Option<String>,
until: Option<String>,
order: Option<String>,
) -> Result<Vec<ProjectUsage>, String> {
let claude_path = dirs::home_dir()
.ok_or("Failed to get home directory")?
.join(".claude");
let all_entries = get_all_usage_entries(&claude_path);
let since_date = since.and_then(|s| NaiveDate::parse_from_str(&s, "%Y%m%d").ok());
let until_date = until.and_then(|s| NaiveDate::parse_from_str(&s, "%Y%m%d").ok());
let filtered_entries: Vec<_> = all_entries
.into_iter()
.filter(|e| {
if let Ok(dt) = DateTime::parse_from_rfc3339(&e.timestamp) {
let date = dt.with_timezone(&Local).date_naive();
let is_after_since = since_date.map_or(true, |s| date >= s);
let is_before_until = until_date.map_or(true, |u| date <= u);
is_after_since && is_before_until
} else {
false
}
})
.collect();
let mut session_stats: HashMap<String, ProjectUsage> = HashMap::new();
for entry in &filtered_entries {
let session_key = format!("{}/{}", entry.project_path, entry.session_id);
let project_stat = session_stats
.entry(session_key)
.or_insert_with(|| ProjectUsage {
project_path: entry.project_path.clone(),
project_name: entry.session_id.clone(), // Using session_id as project_name for session view
total_cost: 0.0,
total_tokens: 0,
session_count: 0, // In this context, this will count entries per session
last_used: " ".to_string(),
});
project_stat.total_cost += entry.cost;
project_stat.total_tokens += entry.input_tokens
+ entry.output_tokens
+ entry.cache_creation_tokens
+ entry.cache_read_tokens;
project_stat.session_count += 1;
if entry.timestamp > project_stat.last_used {
project_stat.last_used = entry.timestamp.clone();
}
}
let mut by_session: Vec<ProjectUsage> = session_stats.into_values().collect();
// Sort by last_used date
if let Some(order_str) = order {
if order_str == "asc" {
by_session.sort_by(|a, b| a.last_used.cmp(&b.last_used));
} else {
by_session.sort_by(|a, b| b.last_used.cmp(&a.last_used));
}
} else {
// Default to descending
by_session.sort_by(|a, b| b.last_used.cmp(&a.last_used));
}
Ok(by_session)
}