fix tokens
This commit is contained in:
@ -6,8 +6,8 @@ pub enum TokenCommands {
|
||||
/// Show Claude Code token usage summary and estimated costs
|
||||
Summary {
|
||||
/// Time period (today, week, month, all)
|
||||
#[arg(long, default_value = "today")]
|
||||
period: String,
|
||||
#[arg(long, default_value = "week")]
|
||||
period: Option<String>,
|
||||
/// Claude Code data directory path
|
||||
#[arg(long)]
|
||||
claude_dir: Option<PathBuf>,
|
||||
@ -16,13 +16,16 @@ pub enum TokenCommands {
|
||||
details: bool,
|
||||
/// Output format (table, json)
|
||||
#[arg(long, default_value = "table")]
|
||||
format: String,
|
||||
format: Option<String>,
|
||||
/// Cost calculation mode (auto, calculate, display)
|
||||
#[arg(long, default_value = "auto")]
|
||||
mode: Option<String>,
|
||||
},
|
||||
/// Show daily token usage breakdown
|
||||
Daily {
|
||||
/// Number of days to show
|
||||
#[arg(long, default_value = "7")]
|
||||
days: u32,
|
||||
days: Option<u32>,
|
||||
/// Claude Code data directory path
|
||||
#[arg(long)]
|
||||
claude_dir: Option<PathBuf>,
|
||||
@ -33,4 +36,39 @@ pub enum TokenCommands {
|
||||
#[arg(long)]
|
||||
claude_dir: Option<PathBuf>,
|
||||
},
|
||||
/// Analyze specific JSONL file (advanced)
|
||||
Analyze {
|
||||
/// Path to JSONL file to analyze
|
||||
file: PathBuf,
|
||||
},
|
||||
/// Generate beautiful token usage report using DuckDB (like the viral Claude Code usage visualization)
|
||||
Report {
|
||||
/// Number of days to include in report
|
||||
#[arg(long, default_value = "7")]
|
||||
days: Option<u32>,
|
||||
},
|
||||
/// Show detailed cost breakdown by session (requires DuckDB)
|
||||
Cost {
|
||||
/// Month to analyze (YYYY-MM, 'today', 'current')
|
||||
#[arg(long)]
|
||||
month: Option<String>,
|
||||
},
|
||||
/// Show token usage breakdown by project
|
||||
Projects {
|
||||
/// Time period (today, week, month, all)
|
||||
#[arg(long, default_value = "week")]
|
||||
period: Option<String>,
|
||||
/// Claude Code data directory path
|
||||
#[arg(long)]
|
||||
claude_dir: Option<PathBuf>,
|
||||
/// Cost calculation mode (auto, calculate, display)
|
||||
#[arg(long, default_value = "calculate")]
|
||||
mode: Option<String>,
|
||||
/// Show detailed breakdown
|
||||
#[arg(long)]
|
||||
details: bool,
|
||||
/// Number of top projects to show
|
||||
#[arg(long, default_value = "10")]
|
||||
top: Option<u32>,
|
||||
},
|
||||
}
|
@ -6,16 +6,10 @@ use crate::persona::Persona;
|
||||
use crate::transmission::TransmissionController;
|
||||
use crate::scheduler::AIScheduler;
|
||||
|
||||
// Token commands enum (placeholder for tokens.rs)
|
||||
#[derive(Debug, clap::Subcommand)]
|
||||
pub enum TokenCommands {
|
||||
Analyze { file: PathBuf },
|
||||
Report { days: Option<u32> },
|
||||
Cost { month: Option<String> },
|
||||
Summary { period: Option<String>, claude_dir: Option<PathBuf>, details: bool, format: Option<String> },
|
||||
Daily { days: Option<u32>, claude_dir: Option<PathBuf> },
|
||||
Status { claude_dir: Option<PathBuf> },
|
||||
}
|
||||
// Re-export from commands module
|
||||
pub use commands::TokenCommands;
|
||||
|
||||
mod commands;
|
||||
|
||||
pub async fn handle_server(port: Option<u16>, data_dir: Option<PathBuf>) -> Result<()> {
|
||||
let port = port.unwrap_or(8080);
|
||||
|
@ -260,6 +260,7 @@ impl OpenAIProvider {
|
||||
name: None,
|
||||
}
|
||||
),
|
||||
#[allow(deprecated)]
|
||||
ChatCompletionRequestMessage::Assistant(
|
||||
ChatCompletionRequestAssistantMessage {
|
||||
content: message.content.clone(),
|
||||
@ -317,7 +318,8 @@ impl OpenAIProvider {
|
||||
|
||||
match function_name.as_str() {
|
||||
"get_memories" => {
|
||||
let limit = arguments.get("limit").and_then(|v| v.as_i64()).unwrap_or(5);
|
||||
|
||||
let _limit = arguments.get("limit").and_then(|v| v.as_i64()).unwrap_or(5);
|
||||
// TODO: Implement actual MCP call
|
||||
Ok(json!({"info": "記憶機能は実装中です"}))
|
||||
}
|
||||
@ -387,4 +389,4 @@ impl OpenAIProvider {
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
688
src/tokens.rs
688
src/tokens.rs
@ -7,18 +7,27 @@ use std::io::{BufRead, BufReader};
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use crate::cli::TokenCommands;
|
||||
use std::process::Command;
|
||||
|
||||
/// Token usage record from Claude Code JSONL files
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
pub struct TokenRecord {
|
||||
#[serde(default)]
|
||||
pub timestamp: String,
|
||||
pub timestamp: Option<String>,
|
||||
#[serde(default)]
|
||||
pub usage: Option<TokenUsage>,
|
||||
pub r#type: Option<String>,
|
||||
#[serde(default)]
|
||||
pub model: Option<String>,
|
||||
pub message: Option<serde_json::Value>,
|
||||
#[serde(default)]
|
||||
pub conversation_id: Option<String>,
|
||||
#[serde(rename = "sessionId")]
|
||||
pub session_id: Option<String>,
|
||||
#[serde(default)]
|
||||
#[serde(rename = "costUSD")]
|
||||
pub cost_usd: Option<f64>,
|
||||
#[serde(default)]
|
||||
pub uuid: Option<String>,
|
||||
#[serde(default)]
|
||||
pub cwd: Option<String>,
|
||||
}
|
||||
|
||||
/// Token usage details
|
||||
@ -32,14 +41,38 @@ pub struct TokenUsage {
|
||||
pub total_tokens: Option<u64>,
|
||||
}
|
||||
|
||||
/// Cost calculation modes
|
||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||
pub enum CostMode {
|
||||
/// Use costUSD if available, otherwise calculate from tokens
|
||||
Auto,
|
||||
/// Always calculate costs from token counts, ignore costUSD
|
||||
Calculate,
|
||||
/// Always use pre-calculated costUSD values, show 0 for missing costs
|
||||
Display,
|
||||
}
|
||||
|
||||
impl From<&str> for CostMode {
|
||||
fn from(mode: &str) -> Self {
|
||||
match mode {
|
||||
"calculate" => CostMode::Calculate,
|
||||
"display" => CostMode::Display,
|
||||
_ => CostMode::Auto, // default
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Cost calculation summary
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct CostSummary {
|
||||
pub input_tokens: u64,
|
||||
pub output_tokens: u64,
|
||||
pub cache_creation_tokens: u64,
|
||||
pub cache_read_tokens: u64,
|
||||
pub total_tokens: u64,
|
||||
pub input_cost_usd: f64,
|
||||
pub output_cost_usd: f64,
|
||||
pub cache_cost_usd: f64,
|
||||
pub total_cost_usd: f64,
|
||||
pub total_cost_jpy: f64,
|
||||
pub record_count: usize,
|
||||
@ -52,6 +85,14 @@ pub struct DailyBreakdown {
|
||||
pub summary: CostSummary,
|
||||
}
|
||||
|
||||
/// Project breakdown of token usage
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct ProjectBreakdown {
|
||||
pub project_path: String,
|
||||
pub project_name: String,
|
||||
pub summary: CostSummary,
|
||||
}
|
||||
|
||||
/// Configuration for cost calculation
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct CostConfig {
|
||||
@ -105,16 +146,26 @@ impl TokenAnalyzer {
|
||||
None
|
||||
}
|
||||
|
||||
/// Parse JSONL files from Claude data directory
|
||||
/// Parse JSONL files from Claude data directory (recursive search)
|
||||
pub fn parse_jsonl_files<P: AsRef<Path>>(&self, claude_dir: P) -> Result<Vec<TokenRecord>> {
|
||||
let claude_dir = claude_dir.as_ref();
|
||||
let mut records = Vec::new();
|
||||
|
||||
// Look for JSONL files in the directory
|
||||
if let Ok(entries) = std::fs::read_dir(claude_dir) {
|
||||
// Recursively look for JSONL files in the directory and subdirectories
|
||||
self.parse_jsonl_files_recursive(claude_dir, &mut records)?;
|
||||
|
||||
Ok(records)
|
||||
}
|
||||
|
||||
/// Recursively parse JSONL files
|
||||
fn parse_jsonl_files_recursive(&self, dir: &Path, records: &mut Vec<TokenRecord>) -> Result<()> {
|
||||
if let Ok(entries) = std::fs::read_dir(dir) {
|
||||
for entry in entries.flatten() {
|
||||
let path = entry.path();
|
||||
if path.extension().map_or(false, |ext| ext == "jsonl") {
|
||||
if path.is_dir() {
|
||||
// Recursively search subdirectories
|
||||
self.parse_jsonl_files_recursive(&path, records)?;
|
||||
} else if path.extension().map_or(false, |ext| ext == "jsonl") {
|
||||
match self.parse_jsonl_file(&path) {
|
||||
Ok(mut file_records) => records.append(&mut file_records),
|
||||
Err(e) => {
|
||||
@ -124,8 +175,7 @@ impl TokenAnalyzer {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(records)
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Parse a single JSONL file
|
||||
@ -143,8 +193,10 @@ impl TokenAnalyzer {
|
||||
|
||||
match serde_json::from_str::<TokenRecord>(&line_content) {
|
||||
Ok(record) => {
|
||||
// Only include records with usage data
|
||||
if record.usage.is_some() {
|
||||
// Only include records with usage data in message or costUSD
|
||||
let has_usage_data = record.cost_usd.is_some() ||
|
||||
record.message.as_ref().and_then(|m| m.get("usage")).is_some();
|
||||
if has_usage_data {
|
||||
records.push(record);
|
||||
}
|
||||
}
|
||||
@ -164,28 +216,143 @@ impl TokenAnalyzer {
|
||||
|
||||
/// Calculate cost summary from records
|
||||
pub fn calculate_costs(&self, records: &[TokenRecord]) -> CostSummary {
|
||||
self.calculate_costs_with_mode(records, CostMode::Auto)
|
||||
}
|
||||
|
||||
/// Calculate cost summary from records with specified cost mode
|
||||
pub fn calculate_costs_with_mode(&self, records: &[TokenRecord], mode: CostMode) -> CostSummary {
|
||||
let mut input_tokens = 0u64;
|
||||
let mut output_tokens = 0u64;
|
||||
let mut cache_creation_tokens = 0u64;
|
||||
let mut cache_read_tokens = 0u64;
|
||||
let mut total_cost_usd = 0.0;
|
||||
let mut cost_records_count = 0;
|
||||
|
||||
for record in records {
|
||||
if let Some(usage) = &record.usage {
|
||||
input_tokens += usage.input_tokens.unwrap_or(0);
|
||||
output_tokens += usage.output_tokens.unwrap_or(0);
|
||||
// Extract token usage from message.usage field
|
||||
if let Some(message) = &record.message {
|
||||
if let Some(usage) = message.get("usage") {
|
||||
if let Some(input) = usage.get("input_tokens").and_then(|v| v.as_u64()) {
|
||||
input_tokens += input;
|
||||
}
|
||||
if let Some(output) = usage.get("output_tokens").and_then(|v| v.as_u64()) {
|
||||
output_tokens += output;
|
||||
}
|
||||
// Track cache tokens separately
|
||||
if let Some(cache_creation) = usage.get("cache_creation_input_tokens").and_then(|v| v.as_u64()) {
|
||||
cache_creation_tokens += cache_creation;
|
||||
}
|
||||
if let Some(cache_read) = usage.get("cache_read_input_tokens").and_then(|v| v.as_u64()) {
|
||||
cache_read_tokens += cache_read;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Calculate cost based on mode
|
||||
let record_cost = match mode {
|
||||
CostMode::Display => {
|
||||
// Always use costUSD, even if undefined (0.0)
|
||||
record.cost_usd.unwrap_or(0.0)
|
||||
}
|
||||
CostMode::Calculate => {
|
||||
// Always calculate from tokens
|
||||
if let Some(message) = &record.message {
|
||||
if let Some(usage) = message.get("usage") {
|
||||
let input = usage.get("input_tokens").and_then(|v| v.as_u64()).unwrap_or(0) as f64;
|
||||
let output = usage.get("output_tokens").and_then(|v| v.as_u64()).unwrap_or(0) as f64;
|
||||
let cache_creation = usage.get("cache_creation_input_tokens").and_then(|v| v.as_u64()).unwrap_or(0) as f64;
|
||||
let cache_read = usage.get("cache_read_input_tokens").and_then(|v| v.as_u64()).unwrap_or(0) as f64;
|
||||
|
||||
// Regular tokens at normal price
|
||||
let regular_cost = (input / 1_000_000.0) * self.config.input_cost_per_1m +
|
||||
(output / 1_000_000.0) * self.config.output_cost_per_1m;
|
||||
|
||||
// Cache tokens - cache creation at normal price, cache read at reduced price
|
||||
let cache_cost = (cache_creation / 1_000_000.0) * self.config.input_cost_per_1m +
|
||||
(cache_read / 1_000_000.0) * (self.config.input_cost_per_1m * 0.1); // 10% of normal price for cache reads
|
||||
|
||||
regular_cost + cache_cost
|
||||
} else {
|
||||
0.0
|
||||
}
|
||||
} else {
|
||||
0.0
|
||||
}
|
||||
}
|
||||
CostMode::Auto => {
|
||||
// Use costUSD if available, otherwise calculate from tokens
|
||||
if let Some(cost) = record.cost_usd {
|
||||
cost
|
||||
} else if let Some(message) = &record.message {
|
||||
if let Some(usage) = message.get("usage") {
|
||||
let input = usage.get("input_tokens").and_then(|v| v.as_u64()).unwrap_or(0) as f64;
|
||||
let output = usage.get("output_tokens").and_then(|v| v.as_u64()).unwrap_or(0) as f64;
|
||||
let cache_creation = usage.get("cache_creation_input_tokens").and_then(|v| v.as_u64()).unwrap_or(0) as f64;
|
||||
let cache_read = usage.get("cache_read_input_tokens").and_then(|v| v.as_u64()).unwrap_or(0) as f64;
|
||||
|
||||
// Regular tokens at normal price
|
||||
let regular_cost = (input / 1_000_000.0) * self.config.input_cost_per_1m +
|
||||
(output / 1_000_000.0) * self.config.output_cost_per_1m;
|
||||
|
||||
// Cache tokens - cache creation at normal price, cache read at reduced price
|
||||
let cache_cost = (cache_creation / 1_000_000.0) * self.config.input_cost_per_1m +
|
||||
(cache_read / 1_000_000.0) * (self.config.input_cost_per_1m * 0.1); // 10% of normal price for cache reads
|
||||
|
||||
regular_cost + cache_cost
|
||||
} else {
|
||||
0.0
|
||||
}
|
||||
} else {
|
||||
0.0
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
total_cost_usd += record_cost;
|
||||
if record.cost_usd.is_some() {
|
||||
cost_records_count += 1;
|
||||
}
|
||||
}
|
||||
|
||||
// Debug info
|
||||
match mode {
|
||||
CostMode::Display => {
|
||||
if cost_records_count > 0 {
|
||||
println!("Debug: Display mode - Found {} records with costUSD data, total: ${:.4}", cost_records_count, total_cost_usd);
|
||||
} else {
|
||||
println!("Debug: Display mode - No costUSD data found, showing $0.00");
|
||||
}
|
||||
}
|
||||
CostMode::Calculate => {
|
||||
println!("Debug: Calculate mode - Using token-based calculation only, total: ${:.4}", total_cost_usd);
|
||||
}
|
||||
CostMode::Auto => {
|
||||
if cost_records_count > 0 {
|
||||
println!("Debug: Auto mode - Found {} records with costUSD data, total: ${:.4}", cost_records_count, total_cost_usd);
|
||||
} else {
|
||||
println!("Debug: Auto mode - No costUSD data found, using token-based calculation");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let total_tokens = input_tokens + output_tokens;
|
||||
let total_tokens = input_tokens + output_tokens + cache_creation_tokens + cache_read_tokens;
|
||||
|
||||
// Calculate component costs for display purposes
|
||||
let input_cost_usd = (input_tokens as f64 / 1_000_000.0) * self.config.input_cost_per_1m;
|
||||
let output_cost_usd = (output_tokens as f64 / 1_000_000.0) * self.config.output_cost_per_1m;
|
||||
let total_cost_usd = input_cost_usd + output_cost_usd;
|
||||
let cache_cost_usd = (cache_creation_tokens as f64 / 1_000_000.0) * self.config.input_cost_per_1m +
|
||||
(cache_read_tokens as f64 / 1_000_000.0) * (self.config.input_cost_per_1m * 0.1);
|
||||
let total_cost_jpy = total_cost_usd * self.config.usd_to_jpy_rate;
|
||||
|
||||
CostSummary {
|
||||
input_tokens,
|
||||
output_tokens,
|
||||
cache_creation_tokens,
|
||||
cache_read_tokens,
|
||||
total_tokens,
|
||||
input_cost_usd,
|
||||
output_cost_usd,
|
||||
cache_cost_usd,
|
||||
total_cost_usd,
|
||||
total_cost_jpy,
|
||||
record_count: records.len(),
|
||||
@ -194,11 +361,19 @@ impl TokenAnalyzer {
|
||||
|
||||
/// Group records by date (JST timezone)
|
||||
pub fn group_by_date(&self, records: &[TokenRecord]) -> Result<HashMap<String, Vec<TokenRecord>>> {
|
||||
self.group_by_date_with_mode(records, CostMode::Auto)
|
||||
}
|
||||
|
||||
/// Group records by date (JST timezone) with cost mode
|
||||
pub fn group_by_date_with_mode(&self, records: &[TokenRecord], _mode: CostMode) -> Result<HashMap<String, Vec<TokenRecord>>> {
|
||||
let mut grouped: HashMap<String, Vec<TokenRecord>> = HashMap::new();
|
||||
|
||||
for record in records {
|
||||
let date_str = self.extract_date_jst(&record.timestamp)?;
|
||||
grouped.entry(date_str).or_insert_with(Vec::new).push(record.clone());
|
||||
if let Some(ref timestamp) = record.timestamp {
|
||||
if let Ok(date_str) = self.extract_date_jst(timestamp) {
|
||||
grouped.entry(date_str).or_insert_with(Vec::new).push(record.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(grouped)
|
||||
@ -210,13 +385,13 @@ impl TokenAnalyzer {
|
||||
return Err(anyhow!("Empty timestamp"));
|
||||
}
|
||||
|
||||
// Try to parse various timestamp formats
|
||||
// Try to parse various timestamp formats and convert to JST
|
||||
let dt = if let Ok(dt) = DateTime::parse_from_rfc3339(timestamp) {
|
||||
dt.with_timezone(&chrono_tz::Asia::Tokyo)
|
||||
dt.with_timezone(&chrono::FixedOffset::east_opt(9 * 3600).unwrap())
|
||||
} else if let Ok(dt) = DateTime::parse_from_str(timestamp, "%Y-%m-%dT%H:%M:%S%.fZ") {
|
||||
dt.with_timezone(&chrono_tz::Asia::Tokyo)
|
||||
dt.with_timezone(&chrono::FixedOffset::east_opt(9 * 3600).unwrap())
|
||||
} else if let Ok(dt) = chrono::DateTime::parse_from_str(timestamp, "%Y-%m-%d %H:%M:%S") {
|
||||
dt.with_timezone(&chrono_tz::Asia::Tokyo)
|
||||
dt.with_timezone(&chrono::FixedOffset::east_opt(9 * 3600).unwrap())
|
||||
} else {
|
||||
return Err(anyhow!("Failed to parse timestamp: {}", timestamp));
|
||||
};
|
||||
@ -224,6 +399,59 @@ impl TokenAnalyzer {
|
||||
Ok(dt.format("%Y-%m-%d").to_string())
|
||||
}
|
||||
|
||||
/// Group records by project path
|
||||
pub fn group_by_project(&self, records: &[TokenRecord]) -> Result<HashMap<String, Vec<TokenRecord>>> {
|
||||
self.group_by_project_with_mode(records, CostMode::Auto)
|
||||
}
|
||||
|
||||
/// Group records by project path with cost mode
|
||||
pub fn group_by_project_with_mode(&self, records: &[TokenRecord], _mode: CostMode) -> Result<HashMap<String, Vec<TokenRecord>>> {
|
||||
let mut grouped: HashMap<String, Vec<TokenRecord>> = HashMap::new();
|
||||
|
||||
for record in records {
|
||||
// Extract project path from cwd field (at top level of JSON)
|
||||
let project_path = record.cwd
|
||||
.as_ref()
|
||||
.unwrap_or(&"Unknown Project".to_string())
|
||||
.clone();
|
||||
|
||||
grouped.entry(project_path).or_insert_with(Vec::new).push(record.clone());
|
||||
}
|
||||
|
||||
Ok(grouped)
|
||||
}
|
||||
|
||||
/// Generate project breakdown with cost mode
|
||||
pub fn project_breakdown_with_mode(&self, records: &[TokenRecord], mode: CostMode) -> Result<Vec<ProjectBreakdown>> {
|
||||
let grouped = self.group_by_project_with_mode(records, mode)?;
|
||||
let mut breakdowns: Vec<ProjectBreakdown> = grouped
|
||||
.into_iter()
|
||||
.map(|(project_path, project_records)| {
|
||||
let project_name = std::path::Path::new(&project_path)
|
||||
.file_name()
|
||||
.and_then(|name| name.to_str())
|
||||
.unwrap_or(&project_path)
|
||||
.to_string();
|
||||
|
||||
ProjectBreakdown {
|
||||
project_path: project_path.clone(),
|
||||
project_name,
|
||||
summary: self.calculate_costs_with_mode(&project_records, mode),
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
// Sort by total cost (highest first)
|
||||
breakdowns.sort_by(|a, b| b.summary.total_cost_usd.partial_cmp(&a.summary.total_cost_usd).unwrap_or(std::cmp::Ordering::Equal));
|
||||
|
||||
Ok(breakdowns)
|
||||
}
|
||||
|
||||
/// Generate project breakdown
|
||||
pub fn project_breakdown(&self, records: &[TokenRecord]) -> Result<Vec<ProjectBreakdown>> {
|
||||
self.project_breakdown_with_mode(records, CostMode::Auto)
|
||||
}
|
||||
|
||||
/// Generate daily breakdown
|
||||
pub fn daily_breakdown(&self, records: &[TokenRecord]) -> Result<Vec<DailyBreakdown>> {
|
||||
let grouped = self.group_by_date(records)?;
|
||||
@ -255,9 +483,11 @@ impl TokenAnalyzer {
|
||||
let filtered: Vec<TokenRecord> = records
|
||||
.iter()
|
||||
.filter(|record| {
|
||||
if let Ok(date_str) = self.extract_date_jst(&record.timestamp) {
|
||||
if let Ok(record_date) = chrono::NaiveDate::parse_from_str(&date_str, "%Y-%m-%d") {
|
||||
return record_date.and_hms_opt(0, 0, 0).unwrap() >= cutoff;
|
||||
if let Some(ref timestamp) = record.timestamp {
|
||||
if let Ok(date_str) = self.extract_date_jst(timestamp) {
|
||||
if let Ok(record_date) = chrono::NaiveDate::parse_from_str(&date_str, "%Y-%m-%d") {
|
||||
return record_date.and_hms_opt(0, 0, 0).unwrap() >= cutoff;
|
||||
}
|
||||
}
|
||||
}
|
||||
false
|
||||
@ -272,12 +502,13 @@ impl TokenAnalyzer {
|
||||
/// Handle token-related commands
|
||||
pub async fn handle_tokens(command: TokenCommands) -> Result<()> {
|
||||
match command {
|
||||
TokenCommands::Summary { period, claude_dir, details, format } => {
|
||||
TokenCommands::Summary { period, claude_dir, details, format, mode } => {
|
||||
handle_summary(
|
||||
period.unwrap_or_else(|| "week".to_string()),
|
||||
claude_dir,
|
||||
details,
|
||||
format.unwrap_or_else(|| "table".to_string())
|
||||
format.unwrap_or_else(|| "table".to_string()),
|
||||
mode.unwrap_or_else(|| "auto".to_string())
|
||||
).await
|
||||
}
|
||||
TokenCommands::Daily { days, claude_dir } => {
|
||||
@ -287,16 +518,22 @@ pub async fn handle_tokens(command: TokenCommands) -> Result<()> {
|
||||
handle_status(claude_dir).await
|
||||
}
|
||||
TokenCommands::Analyze { file } => {
|
||||
println!("Token analysis for file: {:?} - Not implemented yet", file);
|
||||
Ok(())
|
||||
handle_analyze_file(file).await
|
||||
}
|
||||
TokenCommands::Report { days } => {
|
||||
println!("Token report for {} days - Not implemented yet", days.unwrap_or(7));
|
||||
Ok(())
|
||||
handle_duckdb_report(days.unwrap_or(7)).await
|
||||
}
|
||||
TokenCommands::Cost { month } => {
|
||||
println!("Token cost for month: {} - Not implemented yet", month.unwrap_or_else(|| "current".to_string()));
|
||||
Ok(())
|
||||
handle_duckdb_cost(month).await
|
||||
}
|
||||
TokenCommands::Projects { period, claude_dir, mode, details, top } => {
|
||||
handle_projects(
|
||||
period.unwrap_or_else(|| "week".to_string()),
|
||||
claude_dir,
|
||||
mode.unwrap_or_else(|| "calculate".to_string()),
|
||||
details,
|
||||
top.unwrap_or(10)
|
||||
).await
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -307,14 +544,17 @@ async fn handle_summary(
|
||||
claude_dir: Option<PathBuf>,
|
||||
details: bool,
|
||||
format: String,
|
||||
mode: String,
|
||||
) -> Result<()> {
|
||||
let analyzer = TokenAnalyzer::new();
|
||||
let cost_mode = CostMode::from(mode.as_str());
|
||||
|
||||
// Find Claude data directory
|
||||
let data_dir = claude_dir.or_else(|| TokenAnalyzer::find_claude_data_dir())
|
||||
.ok_or_else(|| anyhow!("Claude Code data directory not found"))?;
|
||||
|
||||
println!("Loading data from: {}", data_dir.display());
|
||||
println!("Cost calculation mode: {:?}", cost_mode);
|
||||
|
||||
// Parse records
|
||||
let all_records = analyzer.parse_jsonl_files(&data_dir)?;
|
||||
@ -322,6 +562,11 @@ async fn handle_summary(
|
||||
println!("No token usage data found");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
println!("Debug: Found {} total records", all_records.len());
|
||||
if let Some(latest) = all_records.iter().filter_map(|r| r.timestamp.as_ref()).max() {
|
||||
println!("Debug: Latest timestamp: {}", latest);
|
||||
}
|
||||
|
||||
// Filter by period
|
||||
let filtered_records = analyzer.filter_by_period(&all_records, &period)?;
|
||||
@ -330,8 +575,8 @@ async fn handle_summary(
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Calculate summary
|
||||
let summary = analyzer.calculate_costs(&filtered_records);
|
||||
// Calculate summary with specified mode
|
||||
let summary = analyzer.calculate_costs_with_mode(&filtered_records, cost_mode);
|
||||
|
||||
// Output results
|
||||
match format.as_str() {
|
||||
@ -339,7 +584,7 @@ async fn handle_summary(
|
||||
println!("{}", serde_json::to_string_pretty(&summary)?);
|
||||
}
|
||||
"table" | _ => {
|
||||
print_summary_table(&summary, &period, details);
|
||||
print_summary_table_with_mode(&summary, &period, details, &cost_mode);
|
||||
}
|
||||
}
|
||||
|
||||
@ -405,22 +650,124 @@ async fn handle_status(claude_dir: Option<PathBuf>) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Handle projects command
|
||||
async fn handle_projects(
|
||||
period: String,
|
||||
claude_dir: Option<PathBuf>,
|
||||
mode: String,
|
||||
details: bool,
|
||||
top: u32,
|
||||
) -> Result<()> {
|
||||
let analyzer = TokenAnalyzer::new();
|
||||
let cost_mode = CostMode::from(mode.as_str());
|
||||
|
||||
// Find Claude data directory
|
||||
let data_dir = claude_dir.or_else(|| TokenAnalyzer::find_claude_data_dir())
|
||||
.ok_or_else(|| anyhow!("Claude Code data directory not found"))?;
|
||||
|
||||
println!("Loading data from: {}", data_dir.display());
|
||||
println!("Cost calculation mode: {:?}", cost_mode);
|
||||
|
||||
// Parse records
|
||||
let all_records = analyzer.parse_jsonl_files(&data_dir)?;
|
||||
if all_records.is_empty() {
|
||||
println!("No token usage data found");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
println!("Debug: Found {} total records", all_records.len());
|
||||
|
||||
// Filter by period
|
||||
let filtered_records = analyzer.filter_by_period(&all_records, &period)?;
|
||||
if filtered_records.is_empty() {
|
||||
println!("No data found for period: {}", period);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Generate project breakdown
|
||||
let project_breakdown = analyzer.project_breakdown_with_mode(&filtered_records, cost_mode)?;
|
||||
let limited_breakdown: Vec<_> = project_breakdown.into_iter().take(top as usize).collect();
|
||||
|
||||
// Print project breakdown
|
||||
print_project_breakdown(&limited_breakdown, &period, details, &cost_mode);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Print project breakdown
|
||||
fn print_project_breakdown(breakdown: &[ProjectBreakdown], period: &str, details: bool, mode: &CostMode) {
|
||||
println!("\n=== Claude Code Token Usage by Project ({}) ===", period);
|
||||
println!();
|
||||
|
||||
for (i, project) in breakdown.iter().enumerate() {
|
||||
println!("{}. 📁 {} ({})",
|
||||
i + 1,
|
||||
project.project_name,
|
||||
if project.project_path.len() > 50 {
|
||||
format!("...{}", &project.project_path[project.project_path.len()-47..])
|
||||
} else {
|
||||
project.project_path.clone()
|
||||
}
|
||||
);
|
||||
|
||||
println!(" 📊 Tokens: {} total", format_number(project.summary.total_tokens));
|
||||
|
||||
if details {
|
||||
println!(" • Input: {:>12}", format_number(project.summary.input_tokens));
|
||||
println!(" • Output: {:>12}", format_number(project.summary.output_tokens));
|
||||
println!(" • Cache create: {:>12}", format_number(project.summary.cache_creation_tokens));
|
||||
println!(" • Cache read: {:>12}", format_number(project.summary.cache_read_tokens));
|
||||
}
|
||||
|
||||
println!(" 💰 Cost: ${:.4} USD (¥{:.0} JPY)",
|
||||
project.summary.total_cost_usd,
|
||||
project.summary.total_cost_jpy);
|
||||
|
||||
if details {
|
||||
println!(" • Records: {}", project.summary.record_count);
|
||||
}
|
||||
|
||||
println!();
|
||||
}
|
||||
|
||||
if breakdown.len() > 1 {
|
||||
let total_tokens: u64 = breakdown.iter().map(|p| p.summary.total_tokens).sum();
|
||||
let total_cost: f64 = breakdown.iter().map(|p| p.summary.total_cost_usd).sum();
|
||||
|
||||
println!("📈 Summary:");
|
||||
println!(" Total tokens: {}", format_number(total_tokens));
|
||||
println!(" Total cost: ${:.4} USD (¥{:.0} JPY)", total_cost, total_cost * 150.0);
|
||||
println!(" Projects shown: {}", breakdown.len());
|
||||
println!();
|
||||
}
|
||||
|
||||
println!("💡 Cost calculation (Mode: {:?})", mode);
|
||||
}
|
||||
|
||||
/// Print summary table
|
||||
fn print_summary_table(summary: &CostSummary, period: &str, details: bool) {
|
||||
print_summary_table_with_mode(summary, period, details, &CostMode::Auto)
|
||||
}
|
||||
|
||||
/// Print summary table with cost mode information
|
||||
fn print_summary_table_with_mode(summary: &CostSummary, period: &str, details: bool, mode: &CostMode) {
|
||||
println!("\n=== Claude Code Token Usage Summary ({}) ===", period);
|
||||
println!();
|
||||
|
||||
println!("📊 Token Usage:");
|
||||
println!(" Input tokens: {:>12}", format_number(summary.input_tokens));
|
||||
println!(" Output tokens: {:>12}", format_number(summary.output_tokens));
|
||||
println!(" Total tokens: {:>12}", format_number(summary.total_tokens));
|
||||
println!(" Input tokens: {:>12}", format_number(summary.input_tokens));
|
||||
println!(" Output tokens: {:>12}", format_number(summary.output_tokens));
|
||||
println!(" Cache creation: {:>12}", format_number(summary.cache_creation_tokens));
|
||||
println!(" Cache read: {:>12}", format_number(summary.cache_read_tokens));
|
||||
println!(" Total tokens: {:>12}", format_number(summary.total_tokens));
|
||||
println!();
|
||||
|
||||
println!("💰 Cost Estimation:");
|
||||
println!(" Input cost: {:>12}", format!("${:.4} USD", summary.input_cost_usd));
|
||||
println!(" Output cost: {:>12}", format!("${:.4} USD", summary.output_cost_usd));
|
||||
println!(" Total cost: {:>12}", format!("${:.4} USD", summary.total_cost_usd));
|
||||
println!(" Total cost: {:>12}", format!("¥{:.0} JPY", summary.total_cost_jpy));
|
||||
println!(" Input cost: {:>12}", format!("${:.4} USD", summary.input_cost_usd));
|
||||
println!(" Output cost: {:>12}", format!("${:.4} USD", summary.output_cost_usd));
|
||||
println!(" Cache cost: {:>12}", format!("${:.4} USD", summary.cache_cost_usd));
|
||||
println!(" Total cost: {:>12}", format!("${:.4} USD", summary.total_cost_usd));
|
||||
println!(" Total cost: {:>12}", format!("¥{:.0} JPY", summary.total_cost_jpy));
|
||||
println!();
|
||||
|
||||
if details {
|
||||
@ -431,9 +778,23 @@ fn print_summary_table(summary: &CostSummary, period: &str, details: bool) {
|
||||
println!();
|
||||
}
|
||||
|
||||
println!("💡 Cost calculation based on:");
|
||||
println!(" Input: $3.00 per 1M tokens");
|
||||
println!(" Output: $15.00 per 1M tokens");
|
||||
println!("💡 Cost calculation (Mode: {:?}):", mode);
|
||||
match mode {
|
||||
CostMode::Display => {
|
||||
println!(" Using pre-calculated costUSD values only");
|
||||
println!(" Missing costs shown as $0.00");
|
||||
}
|
||||
CostMode::Calculate => {
|
||||
println!(" Input: $3.00 per 1M tokens");
|
||||
println!(" Output: $15.00 per 1M tokens");
|
||||
println!(" Ignoring pre-calculated costUSD values");
|
||||
}
|
||||
CostMode::Auto => {
|
||||
println!(" Input: $3.00 per 1M tokens");
|
||||
println!(" Output: $15.00 per 1M tokens");
|
||||
println!(" Using costUSD when available, tokens otherwise");
|
||||
}
|
||||
}
|
||||
println!(" USD to JPY: 150.0");
|
||||
}
|
||||
|
||||
@ -468,6 +829,239 @@ fn format_number(n: u64) -> String {
|
||||
result.chars().rev().collect()
|
||||
}
|
||||
|
||||
/// Handle DuckDB-based token report (daily breakdown)
|
||||
async fn handle_duckdb_report(days: u32) -> Result<()> {
|
||||
if !check_duckdb_available() {
|
||||
print_duckdb_help();
|
||||
return Err(anyhow!("DuckDB is not available"));
|
||||
}
|
||||
|
||||
let claude_dir = TokenAnalyzer::find_claude_data_dir()
|
||||
.ok_or_else(|| anyhow!("Claude Code data directory not found"))?;
|
||||
|
||||
println!("\x1b[1;34m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\x1b[0m");
|
||||
println!("\x1b[1;36m Claude Code トークン使用状況レポート \x1b[0m");
|
||||
println!("\x1b[1;34m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\x1b[0m");
|
||||
println!();
|
||||
|
||||
let duckdb_query = format!(r#"
|
||||
SELECT
|
||||
日付,
|
||||
入力トークン,
|
||||
出力トークン,
|
||||
合計トークン,
|
||||
料金
|
||||
FROM (
|
||||
SELECT
|
||||
strftime(DATE(timestamp::TIMESTAMP AT TIME ZONE 'UTC' AT TIME ZONE 'Asia/Tokyo'), '%Y年%m月%d日') AS 日付,
|
||||
LPAD(FORMAT('{{:,}}', SUM(CAST(message -> 'usage' ->> 'input_tokens' AS INTEGER))), 12, ' ') AS 入力トークン,
|
||||
LPAD(FORMAT('{{:,}}', SUM(CAST(message -> 'usage' ->> 'output_tokens' AS INTEGER))), 12, ' ') AS 出力トークン,
|
||||
LPAD(FORMAT('{{:,}}', SUM(CAST(message -> 'usage' ->> 'input_tokens' AS INTEGER) + CAST(message -> 'usage' ->> 'output_tokens' AS INTEGER))), 12, ' ') AS 合計トークン,
|
||||
LPAD(FORMAT('¥{{:,}}', CAST(ROUND(SUM(costUSD) * 150, 0) AS INTEGER)), 10, ' ') AS 料金,
|
||||
DATE(timestamp::TIMESTAMP AT TIME ZONE 'UTC' AT TIME ZONE 'Asia/Tokyo') as sort_date
|
||||
FROM read_json('{}/**/*.jsonl')
|
||||
WHERE timestamp IS NOT NULL
|
||||
GROUP BY DATE(timestamp::TIMESTAMP AT TIME ZONE 'UTC' AT TIME ZONE 'Asia/Tokyo')
|
||||
|
||||
UNION ALL
|
||||
|
||||
SELECT
|
||||
'────────────────' AS 日付,
|
||||
'────────────' AS 入力トークン,
|
||||
'────────────' AS 出力トークン,
|
||||
'────────────' AS 合計トークン,
|
||||
'──────────' AS 料金,
|
||||
'9999-12-30' as sort_date
|
||||
|
||||
UNION ALL
|
||||
|
||||
SELECT
|
||||
'【合計】' AS 日付,
|
||||
LPAD(FORMAT('{{:,}}', SUM(CAST(message -> 'usage' ->> 'input_tokens' AS INTEGER))), 12, ' ') AS 入力トークン,
|
||||
LPAD(FORMAT('{{:,}}', SUM(CAST(message -> 'usage' ->> 'output_tokens' AS INTEGER))), 12, ' ') AS 出力トークン,
|
||||
LPAD(FORMAT('{{:,}}', SUM(CAST(message -> 'usage' ->> 'input_tokens' AS INTEGER) + CAST(message -> 'usage' ->> 'output_tokens' AS INTEGER))), 12, ' ') AS 合計トークン,
|
||||
LPAD(FORMAT('¥{{:,}}', CAST(ROUND(SUM(costUSD) * 150, 0) AS INTEGER)), 10, ' ') AS 料金,
|
||||
'9999-12-31' as sort_date
|
||||
FROM read_json('{}/**/*.jsonl')
|
||||
WHERE timestamp IS NOT NULL
|
||||
)
|
||||
ORDER BY sort_date DESC NULLS LAST
|
||||
LIMIT {};
|
||||
"#, claude_dir.display(), claude_dir.display(), days + 2);
|
||||
|
||||
let output = Command::new("duckdb")
|
||||
.arg("-c")
|
||||
.arg(&duckdb_query)
|
||||
.output()?;
|
||||
|
||||
if output.status.success() {
|
||||
println!("{}", String::from_utf8_lossy(&output.stdout));
|
||||
} else {
|
||||
println!("Error running DuckDB query:");
|
||||
println!("{}", String::from_utf8_lossy(&output.stderr));
|
||||
}
|
||||
|
||||
println!("\x1b[1;34m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\x1b[0m");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Handle DuckDB-based cost analysis (session breakdown)
|
||||
async fn handle_duckdb_cost(month: Option<String>) -> Result<()> {
|
||||
if !check_duckdb_available() {
|
||||
print_duckdb_help();
|
||||
return Err(anyhow!("DuckDB is not available"));
|
||||
}
|
||||
|
||||
let claude_dir = TokenAnalyzer::find_claude_data_dir()
|
||||
.ok_or_else(|| anyhow!("Claude Code data directory not found"))?;
|
||||
|
||||
let date_filter = match month.as_deref() {
|
||||
Some("today") => "CURRENT_DATE".to_string(),
|
||||
Some(date) if date.contains('-') => format!("'{}'", date),
|
||||
Some("current") | None => "CURRENT_DATE".to_string(),
|
||||
Some(month_str) => {
|
||||
// Try to parse as YYYY-MM format
|
||||
if month_str.len() == 7 && month_str.contains('-') {
|
||||
format!("'{}-01'", month_str)
|
||||
} else {
|
||||
"CURRENT_DATE".to_string()
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
println!("\x1b[1;34m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\x1b[0m");
|
||||
println!("\x1b[1;36m Claude Code 本日のセッション一覧 \x1b[0m");
|
||||
println!("\x1b[1;34m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\x1b[0m");
|
||||
println!();
|
||||
|
||||
let duckdb_query = format!(r#"
|
||||
WITH session_stats AS (
|
||||
SELECT
|
||||
sessionId,
|
||||
MIN(timestamp)::TIMESTAMP as session_start,
|
||||
MAX(timestamp)::TIMESTAMP as session_end,
|
||||
COUNT(DISTINCT CASE WHEN type = 'user' THEN uuid END) as user_messages,
|
||||
COUNT(DISTINCT CASE WHEN type = 'assistant' THEN uuid END) as assistant_messages,
|
||||
SUM(CASE WHEN type = 'assistant' AND json_extract(message, '$.usage.input_tokens') IS NOT NULL THEN CAST(json_extract(message, '$.usage.input_tokens') AS INTEGER) ELSE 0 END) as total_input_tokens,
|
||||
SUM(CASE WHEN type = 'assistant' AND json_extract(message, '$.usage.output_tokens') IS NOT NULL THEN CAST(json_extract(message, '$.usage.output_tokens') AS INTEGER) ELSE 0 END) as total_output_tokens,
|
||||
SUM(CASE WHEN type = 'assistant' AND costUSD IS NOT NULL THEN costUSD ELSE 0 END) as total_cost
|
||||
FROM read_json('{}/**/*.jsonl')
|
||||
WHERE type IN ('user', 'assistant')
|
||||
AND sessionId IS NOT NULL
|
||||
GROUP BY sessionId
|
||||
),
|
||||
today_sessions AS (
|
||||
SELECT * FROM session_stats s
|
||||
WHERE DATE(s.session_start AT TIME ZONE 'UTC' AT TIME ZONE 'Asia/Tokyo') = {}
|
||||
)
|
||||
SELECT
|
||||
ID,
|
||||
開始時刻,
|
||||
時間,
|
||||
メッセージ数,
|
||||
料金,
|
||||
概要
|
||||
FROM (
|
||||
SELECT
|
||||
SUBSTR(CAST(s.sessionId AS VARCHAR), 1, 8) || '...' as ID,
|
||||
STRFTIME((s.session_start AT TIME ZONE 'UTC' AT TIME ZONE 'Asia/Tokyo'), '%m/%d %H:%M') as 開始時刻,
|
||||
LPAD(CAST(ROUND(EXTRACT(EPOCH FROM (s.session_end - s.session_start)) / 60, 0) AS INTEGER) || '分', 5, ' ') as 時間,
|
||||
LPAD(CAST(s.user_messages AS VARCHAR), 6, ' ') as メッセージ数,
|
||||
LPAD(FORMAT('¥{{:,}}', CAST(ROUND(s.total_cost * 150, 0) AS INTEGER)), 8, ' ') as 料金,
|
||||
'本日のセッション' as 概要,
|
||||
s.session_start as sort_key
|
||||
FROM today_sessions s
|
||||
|
||||
UNION ALL
|
||||
|
||||
SELECT
|
||||
'────────────' as ID,
|
||||
'────────────' as 開始時刻,
|
||||
'─────' as 時間,
|
||||
'──────' as メッセージ数,
|
||||
'────────' as 料金,
|
||||
'────────────────────────────────────────────────────────────' as 概要,
|
||||
'9999-12-31'::TIMESTAMP as sort_key
|
||||
|
||||
UNION ALL
|
||||
|
||||
SELECT
|
||||
'【合計】' as ID,
|
||||
CAST(COUNT(*) AS VARCHAR) || '件' as 開始時刻,
|
||||
LPAD(CAST(SUM(ROUND(EXTRACT(EPOCH FROM (session_end - session_start)) / 60, 0)) AS INTEGER) || '分', 5, ' ') as 時間,
|
||||
LPAD(CAST(SUM(user_messages) AS VARCHAR), 6, ' ') as メッセージ数,
|
||||
LPAD(FORMAT('¥{{:,}}', CAST(ROUND(SUM(total_cost) * 150, 0) AS INTEGER)), 8, ' ') as 料金,
|
||||
'本日の合計' as 概要,
|
||||
'9999-12-31'::TIMESTAMP as sort_key
|
||||
FROM today_sessions
|
||||
)
|
||||
ORDER BY sort_key DESC;
|
||||
"#, claude_dir.display(), date_filter);
|
||||
|
||||
let output = Command::new("duckdb")
|
||||
.arg("-c")
|
||||
.arg(&duckdb_query)
|
||||
.output()?;
|
||||
|
||||
if output.status.success() {
|
||||
println!("{}", String::from_utf8_lossy(&output.stdout));
|
||||
} else {
|
||||
println!("Error running DuckDB query:");
|
||||
println!("{}", String::from_utf8_lossy(&output.stderr));
|
||||
}
|
||||
|
||||
println!("\x1b[1;34m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\x1b[0m");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Handle analyze file command
|
||||
async fn handle_analyze_file(file: PathBuf) -> Result<()> {
|
||||
let analyzer = TokenAnalyzer::new();
|
||||
|
||||
if !file.exists() {
|
||||
return Err(anyhow!("File does not exist: {}", file.display()));
|
||||
}
|
||||
|
||||
println!("Analyzing file: {}", file.display());
|
||||
|
||||
let records = analyzer.parse_jsonl_file(&file)?;
|
||||
if records.is_empty() {
|
||||
println!("No token usage records found in file");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let summary = analyzer.calculate_costs(&records);
|
||||
print_summary_table(&summary, &format!("File: {}", file.file_name().unwrap_or_default().to_string_lossy()), true);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Check if DuckDB is available on the system
|
||||
fn check_duckdb_available() -> bool {
|
||||
Command::new("duckdb")
|
||||
.arg("--version")
|
||||
.output()
|
||||
.map(|output| output.status.success())
|
||||
.unwrap_or(false)
|
||||
}
|
||||
|
||||
/// Print DuckDB installation help
|
||||
pub fn print_duckdb_help() {
|
||||
println!("\n🦆 DuckDB is required for advanced token analysis features!");
|
||||
println!();
|
||||
println!("📦 Installation:");
|
||||
println!(" macOS: brew install duckdb");
|
||||
println!(" Windows: Download from https://duckdb.org/docs/installation/");
|
||||
println!(" Linux: apt install duckdb (or download from website)");
|
||||
println!();
|
||||
println!("🚀 After installation, try:");
|
||||
println!(" aigpt tokens report --days 7");
|
||||
println!(" aigpt tokens cost --month today");
|
||||
println!();
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
Reference in New Issue
Block a user