diff --git a/.claude/settings.local.json b/.claude/settings.local.json index e3b71ef..65d81cf 100644 --- a/.claude/settings.local.json +++ b/.claude/settings.local.json @@ -50,7 +50,9 @@ "Bash(sudo lsof:*)", "Bash(sudo:*)", "Bash(cargo check:*)", - "Bash(cargo run:*)" + "Bash(cargo run:*)", + "Bash(cargo test:*)", + "Bash(diff:*)" ], "deny": [] } diff --git a/Cargo.toml b/Cargo.toml index c3a32b5..34da612 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,10 +9,6 @@ authors = ["syui"] name = "aigpt" path = "src/main.rs" -[[bin]] -name = "test-config" -path = "src/bin/test_config.rs" - [dependencies] clap = { version = "4.0", features = ["derive"] } serde = { version = "1.0", features = ["derive"] } diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md deleted file mode 100644 index b6076bb..0000000 --- a/DEVELOPMENT.md +++ /dev/null @@ -1,115 +0,0 @@ -# ai.gpt プロジェクト固有情報 - -## プロジェクト概要 -- **名前**: ai.gpt -- **パッケージ**: aigpt -- **タイプ**: 自律的送信AI + 統合MCP基盤 -- **役割**: 記憶・関係性・開発支援の統合AIシステム - -## 実装完了状況 - -### 🧠 記憶システム(MemoryManager) -- **階層的記憶**: 完全ログ→AI要約→コア記憶→選択的忘却 -- **文脈検索**: キーワード・意味的検索 -- **記憶要約**: AI駆動自動要約機能 - -### 🤝 関係性システム(RelationshipTracker) -- **不可逆性**: 現実の人間関係と同じ重み -- **時間減衰**: 自然な関係性変化 -- **送信判定**: 関係性閾値による自発的コミュニケーション - -### 🎭 人格システム(Persona) -- **AI運勢**: 1-10ランダム値による日々の人格変動 -- **統合管理**: 記憶・関係性・運勢の統合判断 -- **継続性**: 長期記憶による人格継承 - -### 💻 ai.shell統合(Claude Code機能) -- **インタラクティブ環境**: `aigpt shell` -- **開発支援**: ファイル分析・コード生成・プロジェクト管理 -- **継続開発**: プロジェクト文脈保持 - -## MCP Server統合(23ツール) - -### 🧠 Memory System(5ツール) -- get_memories, get_contextual_memories, search_memories -- create_summary, create_core_memory - -### 🤝 Relationships(4ツール) -- get_relationship, get_all_relationships -- process_interaction, check_transmission_eligibility - -### 💻 Shell Integration(5ツール) -- execute_command, analyze_file, write_file -- read_project_file, list_files - -### 🔒 Remote Execution(4ツール) -- remote_shell, ai_bot_status -- isolated_python, isolated_analysis - -### ⚙️ System State(3ツール) -- get_persona_state, get_fortune, run_maintenance - -### 🎴 ai.card連携(6ツール + 独立MCPサーバー) -- card_draw_card, card_get_user_cards, card_analyze_collection -- **独立サーバー**: FastAPI + MCP (port 8000) - -### 📝 ai.log連携(8ツール + Rustサーバー) -- log_create_post, log_ai_content, log_translate_document -- **独立サーバー**: Rust製 (port 8002) - -## 開発環境・設定 - -### 環境構築 -```bash -cd /Users/syui/ai/gpt -./setup_venv.sh -source ~/.config/syui/ai/gpt/venv/bin/activate -``` - -### 設定管理 -- **メイン設定**: `/Users/syui/ai/gpt/config.json` -- **データディレクトリ**: `~/.config/syui/ai/gpt/` -- **仮想環境**: `~/.config/syui/ai/gpt/venv/` - -### 使用方法 -```bash -# ai.shell起動 -aigpt shell --model qwen2.5-coder:latest --provider ollama - -# MCPサーバー起動 -aigpt server --port 8001 - -# 記憶システム体験 -aigpt chat syui "質問内容" --provider ollama --model qwen3:latest -``` - -## 技術アーキテクチャ - -### 統合構成 -``` -ai.gpt (統合MCPサーバー:8001) -├── 🧠 ai.gpt core (記憶・関係性・人格) -├── 💻 ai.shell (Claude Code風開発環境) -├── 🎴 ai.card (独立MCPサーバー:8000) -└── 📝 ai.log (Rust製ブログシステム:8002) -``` - -### 今後の展開 -- **自律送信**: atproto実装による真の自発的コミュニケーション -- **ai.ai連携**: 心理分析AIとの統合 -- **ai.verse統合**: UEメタバースとの連携 -- **分散SNS統合**: atproto完全対応 - -## 革新的な特徴 - -### AI駆動記憶システム -- ChatGPT 4,000件ログから学習した効果的記憶構築 -- 人間的な忘却・重要度判定 - -### 不可逆関係性 -- 現実の人間関係と同じ重みを持つAI関係性 -- 修復不可能な関係性破綻システム - -### 統合アーキテクチャ -- fastapi_mcp基盤での複数AIシステム統合 -- OpenAI Function Calling + MCP完全連携実証済み \ No newline at end of file diff --git a/MIGRATION_STATUS.md b/MIGRATION_STATUS.md deleted file mode 100644 index 2b2edb7..0000000 --- a/MIGRATION_STATUS.md +++ /dev/null @@ -1,324 +0,0 @@ -# ai.gpt Python to Rust Migration Status - -This document tracks the progress of migrating ai.gpt from Python to Rust using the MCP Rust SDK. - -## Migration Strategy - -We're implementing a step-by-step migration approach, comparing each Python command with the Rust implementation to ensure feature parity. - -### Current Status: Phase 9 - Final Implementation (15/16 complete) - -## Command Implementation Status - -| Command | Python Status | Rust Status | Notes | -|---------|---------------|-------------|-------| -| **chat** | ✅ Complete | ✅ Complete | AI providers (Ollama/OpenAI) + memory + relationships + fallback | -| **status** | ✅ Complete | ✅ Complete | Personality, fortune, and relationship display | -| **fortune** | ✅ Complete | ✅ Complete | Fortune calculation and display | -| **relationships** | ✅ Complete | ✅ Complete | Relationship listing with status tracking | -| **transmit** | ✅ Complete | ✅ Complete | Autonomous/breakthrough/maintenance transmission logic | -| **maintenance** | ✅ Complete | ✅ Complete | Daily maintenance + relationship time decay | -| **server** | ✅ Complete | ✅ Complete | MCP server with 9 tools, configuration display | -| **schedule** | ✅ Complete | ✅ Complete | Automated task scheduling with execution history | -| **shell** | ✅ Complete | ✅ Complete | Interactive shell mode with AI integration | -| **config** | ✅ Complete | 🟡 Basic | Basic config structure only | -| **import-chatgpt** | ✅ Complete | ✅ Complete | ChatGPT data import with memory integration | -| **conversation** | ✅ Complete | ❌ Not started | Continuous conversation mode | -| **conv** | ✅ Complete | ❌ Not started | Alias for conversation | -| **docs** | ✅ Complete | ✅ Complete | Documentation management with project discovery and AI enhancement | -| **submodules** | ✅ Complete | ✅ Complete | Submodule management with update, list, and status functionality | -| **tokens** | ✅ Complete | ❌ Not started | Token cost analysis | - -### Legend -- ✅ Complete: Full feature parity with Python version -- 🟡 Basic: Core functionality implemented, missing advanced features -- ❌ Not started: Not yet implemented - -## Data Structure Implementation Status - -| Component | Python Status | Rust Status | Notes | -|-----------|---------------|-------------|-------| -| **Config** | ✅ Complete | ✅ Complete | Data directory management, provider configs | -| **Persona** | ✅ Complete | ✅ Complete | Memory & relationship integration, sentiment analysis | -| **MemoryManager** | ✅ Complete | ✅ Complete | Hierarchical memory system with JSON persistence | -| **RelationshipTracker** | ✅ Complete | ✅ Complete | Time decay, scoring, transmission eligibility | -| **FortuneSystem** | ✅ Complete | ✅ Complete | Daily fortune calculation | -| **TransmissionController** | ✅ Complete | ✅ Complete | Autonomous/breakthrough/maintenance transmission | -| **AIProvider** | ✅ Complete | ✅ Complete | OpenAI and Ollama support with fallback | -| **AIScheduler** | ✅ Complete | ✅ Complete | Automated task scheduling with JSON persistence | -| **MCPServer** | ✅ Complete | ✅ Complete | MCP server with 9 tools and request handling | - -## Architecture Comparison - -### Python Implementation (Current) -``` -├── persona.py # Core personality system -├── memory.py # Hierarchical memory management -├── relationship.py # Relationship tracking with time decay -├── fortune.py # Daily fortune system -├── transmission.py # Autonomous transmission logic -├── scheduler.py # Task scheduling system -├── mcp_server.py # MCP server with 9 tools -├── ai_provider.py # AI provider abstraction -├── config.py # Configuration management -├── cli.py # CLI interface (typer) -└── commands/ # Command modules - ├── docs.py - ├── submodules.py - └── tokens.py -``` - -### Rust Implementation (Current) -``` -├── main.rs # CLI entry point (clap) ✅ -├── persona.rs # Core personality system ✅ -├── config.rs # Configuration management ✅ -├── status.rs # Status command implementation ✅ -├── cli.rs # Command handlers ✅ -├── memory.rs # Memory management ✅ -├── relationship.rs # Relationship tracking ✅ -├── fortune.rs # Fortune system (embedded in persona) ✅ -├── transmission.rs # Transmission logic ✅ -├── scheduler.rs # Task scheduling ✅ -├── mcp_server.rs # MCP server ✅ -├── ai_provider.rs # AI provider abstraction ✅ -└── commands/ # Command modules ❌ - ├── docs.rs - ├── submodules.rs - └── tokens.rs -``` - -## Phase Implementation Plan - -### Phase 1: Core Commands ✅ (Completed) -- [x] Basic CLI structure with clap -- [x] Config system foundation -- [x] Persona basic structure -- [x] Status command (personality + fortune) -- [x] Fortune command -- [x] Relationships command (basic listing) -- [x] Chat command (echo response) - -### Phase 2: Data Systems ✅ (Completed) -- [x] MemoryManager with hierarchical storage -- [x] RelationshipTracker with time decay -- [x] Proper JSON persistence -- [x] Configuration management expansion -- [x] Sentiment analysis integration -- [x] Memory-relationship integration - -### Phase 3: AI Integration ✅ (Completed) -- [x] AI provider abstraction (OpenAI/Ollama) -- [x] Chat command with real AI responses -- [x] Fallback system when AI fails -- [x] Dynamic system prompts based on personality - -### Phase 4: Advanced Features ✅ (Completed) -- [x] TransmissionController (autonomous/breakthrough/maintenance) -- [x] Transmission logging and statistics -- [x] Relationship-based transmission eligibility -- [x] AIScheduler (automated task execution with intervals) -- [x] Task management (create/enable/disable/delete tasks) -- [x] Execution history and statistics - -### Phase 5: MCP Server Implementation ✅ (Completed) -- [x] MCPServer with 9 tools -- [x] Tool definitions with JSON schemas -- [x] Request/response handling system -- [x] Integration with all core systems -- [x] Server command and CLI integration - -### Phase 6: Interactive Shell Mode ✅ (Completed) -- [x] Interactive shell implementation -- [x] Command parsing and execution -- [x] Shell command execution (!commands) -- [x] Slash command support (/commands) -- [x] AI conversation integration -- [x] Help system and command history -- [x] Shell history persistence - -### Phase 7: Import/Export Functionality ✅ (Completed) -- [x] ChatGPT JSON import support -- [x] Memory integration with proper importance scoring -- [x] Relationship tracking for imported conversations -- [x] Timestamp conversion and validation -- [x] Error handling and progress reporting - -### Phase 8: Documentation Management ✅ (Completed) -- [x] Documentation generation with AI enhancement -- [x] Project discovery from ai root directory -- [x] Documentation sync functionality -- [x] Status and listing commands -- [x] Integration with ai ecosystem structure - -### Phase 9: Submodule Management ✅ (Completed) -- [x] Submodule listing with status information -- [x] Submodule update functionality with dry-run support -- [x] Automatic commit generation for updates -- [x] Git integration for submodule operations -- [x] Status overview with comprehensive statistics - -### Phase 10: Final Features -- [ ] Token analysis tools - -## Current Test Results - -### Rust Implementation -```bash -$ cargo run -- status test-user -ai.gpt Status -Mood: Contemplative -Fortune: 1/10 - -Current Personality -analytical: 0.90 -curiosity: 0.70 -creativity: 0.60 -empathy: 0.80 -emotional: 0.40 - -Relationship with: test-user -Status: new -Score: 0.00 -Total Interactions: 2 -Transmission Enabled: false - -# Simple fallback response (no AI provider) -$ cargo run -- chat test-user "Hello, this is great!" -User: Hello, this is great! -AI: I understand your message: 'Hello, this is great!' -(+0.50 relationship) - -Relationship Status: new -Score: 0.50 / 10 -Transmission: ✗ Disabled - -# AI-powered response (with provider) -$ cargo run -- chat test-user "Hello!" --provider ollama --model llama2 -User: Hello! -AI: [Attempts AI response, falls back to simple if provider unavailable] - -Relationship Status: new -Score: 0.00 / 10 -Transmission: ✗ Disabled - -# Autonomous transmission system -$ cargo run -- transmit -🚀 Checking for autonomous transmissions... -No transmissions needed at this time. - -# Daily maintenance -$ cargo run -- maintenance -🔧 Running daily maintenance... -✓ Applied relationship time decay -✓ No maintenance transmissions needed - -📊 Relationship Statistics: -Total: 1 | Active: 1 | Transmission Enabled: 0 | Broken: 0 -Average Score: 0.00 - -✅ Daily maintenance completed! - -# Automated task scheduling -$ cargo run -- schedule -⏰ Running scheduled tasks... -No scheduled tasks due at this time. - -📊 Scheduler Statistics: -Total Tasks: 4 | Enabled: 4 | Due: 0 -Executions: 0 | Today: 0 | Success Rate: 0.0% -Average Duration: 0.0ms - -📅 Upcoming Tasks: - 06-07 02:24 breakthrough_check (29m) - 06-07 02:54 auto_transmission (59m) - 06-07 03:00 daily_maintenance (1h 5m) - 06-07 12:00 maintenance_transmission (10h 5m) - -⏰ Scheduler check completed! - -# MCP Server functionality -$ cargo run -- server -🚀 Starting ai.gpt MCP Server... -🚀 Starting MCP Server on port 8080 -📋 Available tools: 9 - - get_status: Get AI status including mood, fortune, and personality - - chat_with_ai: Send a message to the AI and get a response - - get_relationships: Get all relationships and their statuses - - get_memories: Get memories for a specific user - - check_transmissions: Check and execute autonomous transmissions - - run_maintenance: Run daily maintenance tasks - - run_scheduler: Run scheduled tasks - - get_scheduler_status: Get scheduler statistics and upcoming tasks - - get_transmission_history: Get recent transmission history -✅ MCP Server ready for requests - -📋 Available MCP Tools: -1. get_status - Get AI status including mood, fortune, and personality -2. chat_with_ai - Send a message to the AI and get a response -3. get_relationships - Get all relationships and their statuses -4. get_memories - Get memories for a specific user -5. check_transmissions - Check and execute autonomous transmissions -6. run_maintenance - Run daily maintenance tasks -7. run_scheduler - Run scheduled tasks -8. get_scheduler_status - Get scheduler statistics and upcoming tasks -9. get_transmission_history - Get recent transmission history - -🔧 Server Configuration: -Port: 8080 -Tools: 9 -Protocol: MCP (Model Context Protocol) - -✅ MCP Server is ready to accept requests -``` - -### Python Implementation -```bash -$ uv run aigpt status -ai.gpt Status -Mood: cheerful -Fortune: 6/10 -Current Personality -Curiosity │ 0.70 -Empathy │ 0.70 -Creativity │ 0.48 -Patience │ 0.66 -Optimism │ 0.36 -``` - -## Key Differences to Address - -1. **Fortune Calculation**: Different algorithms producing different values -2. **Personality Traits**: Different trait sets and values -3. **Presentation**: Rich formatting vs simple text output -4. **Data Persistence**: Need to ensure compatibility with existing Python data - -## Next Priority - -Based on our current progress, the next priority should be: - -1. **Interactive Shell Mode**: Continuous conversation mode implementation -2. **Import/Export Features**: ChatGPT data import and conversation export -3. **Command Modules**: docs, submodules, tokens commands -4. **Configuration Management**: Advanced config command functionality - -## Technical Notes - -- **Dependencies**: Using clap for CLI, serde for JSON, tokio for async, anyhow for errors -- **Data Directory**: Following same path as Python (`~/.config/syui/ai/gpt/`) -- **File Compatibility**: JSON format should be compatible between implementations -- **MCP Integration**: Will use Rust MCP SDK when ready for Phase 4 - -## Migration Validation - -To validate migration success, we need to ensure: -- [ ] Same data directory structure -- [ ] Compatible JSON file formats -- [ ] Identical command-line interface -- [ ] Equivalent functionality and behavior -- [ ] Performance improvements from Rust implementation - ---- - -*Last updated: 2025-01-06* -*Current phase: Phase 9 - Submodule Management (15/16 complete)* \ No newline at end of file diff --git a/README.md b/README.md index b6076bb..290400c 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,9 @@ -# ai.gpt プロジェクト固有情報 +# ai.gpt ## プロジェクト概要 - **名前**: ai.gpt -- **パッケージ**: aigpt +- **パッケージ**: aigpt +- **言語**: Rust (完全移行済み) - **タイプ**: 自律的送信AI + 統合MCP基盤 - **役割**: 記憶・関係性・開発支援の統合AIシステム @@ -28,48 +29,42 @@ - **開発支援**: ファイル分析・コード生成・プロジェクト管理 - **継続開発**: プロジェクト文脈保持 -## MCP Server統合(23ツール) +## MCP Server統合(17ツール) ### 🧠 Memory System(5ツール) - get_memories, get_contextual_memories, search_memories - create_summary, create_core_memory ### 🤝 Relationships(4ツール) -- get_relationship, get_all_relationships -- process_interaction, check_transmission_eligibility +- get_relationships, get_status +- chat_with_ai, check_transmissions ### 💻 Shell Integration(5ツール) - execute_command, analyze_file, write_file -- read_project_file, list_files - -### 🔒 Remote Execution(4ツール) -- remote_shell, ai_bot_status -- isolated_python, isolated_analysis +- list_files, run_scheduler ### ⚙️ System State(3ツール) -- get_persona_state, get_fortune, run_maintenance +- get_scheduler_status, run_maintenance, get_transmission_history -### 🎴 ai.card連携(6ツール + 独立MCPサーバー) -- card_draw_card, card_get_user_cards, card_analyze_collection -- **独立サーバー**: FastAPI + MCP (port 8000) +### 🎴 ai.card連携(3ツール) +- get_user_cards, draw_card, get_draw_status +- **統合ServiceClient**: 統一されたHTTP通信基盤 -### 📝 ai.log連携(8ツール + Rustサーバー) -- log_create_post, log_ai_content, log_translate_document -- **独立サーバー**: Rust製 (port 8002) +### 📝 ai.log連携(新機能) +- **統合ServiceClient**: ai.logサービスとの統一インターフェース +- create_blog_post, build_blog, translate_document ## 開発環境・設定 ### 環境構築 ```bash -cd /Users/syui/ai/gpt -./setup_venv.sh -source ~/.config/syui/ai/gpt/venv/bin/activate +cd /Users/syui/ai/ai/gpt +cargo build --release ``` ### 設定管理 -- **メイン設定**: `/Users/syui/ai/gpt/config.json` +- **メイン設定**: `/Users/syui/ai/ai/gpt/config.json.example` - **データディレクトリ**: `~/.config/syui/ai/gpt/` -- **仮想環境**: `~/.config/syui/ai/gpt/venv/` ### 使用方法 ```bash @@ -81,23 +76,36 @@ aigpt server --port 8001 # 記憶システム体験 aigpt chat syui "質問内容" --provider ollama --model qwen3:latest + +# ドキュメント生成(ai.wiki統合) +aigpt docs --wiki ``` ## 技術アーキテクチャ -### 統合構成 +### Rust実装の統合構成 ``` -ai.gpt (統合MCPサーバー:8001) -├── 🧠 ai.gpt core (記憶・関係性・人格) -├── 💻 ai.shell (Claude Code風開発環境) -├── 🎴 ai.card (独立MCPサーバー:8000) -└── 📝 ai.log (Rust製ブログシステム:8002) +ai.gpt (Rust製MCPサーバー:8001) +├── 🧠 Memory & Persona System (Rust) +├── 🤝 Relationship Management (Rust) +├── 📊 Scheduler & Transmission (Rust) +├── 💻 Shell Integration (Rust) +├── 🔗 ServiceClient (統一HTTP基盤) +│ ├── 🎴 ai.card (port 8000) +│ ├── 📝 ai.log (port 8002) +│ └── 🤖 ai.bot (port 8003) +└── 📚 ai.wiki Generator (Rust) ``` +### 最新機能 (2024.06.09) +- **MCP API共通化**: ServiceClient統一基盤 +- **ai.wiki統合**: 自動ドキュメント生成 +- **サービス設定統一**: 動的サービス登録 +- **完全Rust移行**: Python依存完全排除 + ### 今後の展開 - **自律送信**: atproto実装による真の自発的コミュニケーション - **ai.ai連携**: 心理分析AIとの統合 -- **ai.verse統合**: UEメタバースとの連携 - **分散SNS統合**: atproto完全対応 ## 革新的な特徴 @@ -110,6 +118,10 @@ ai.gpt (統合MCPサーバー:8001) - 現実の人間関係と同じ重みを持つAI関係性 - 修復不可能な関係性破綻システム -### 統合アーキテクチャ -- fastapi_mcp基盤での複数AIシステム統合 -- OpenAI Function Calling + MCP完全連携実証済み \ No newline at end of file +### 統合ServiceClient +- 複数AIサービスの統一インターフェース +- DRY原則に基づく共通化実現 +- 設定ベースの柔軟なサービス管理 + +## アーカイブ情報 +詳細な実装履歴・設計資料は `~/ai/ai/ai.wiki/gpt/` に移動済み diff --git a/TOML_MIGRATION.md b/TOML_MIGRATION.md deleted file mode 100644 index 8defcd6..0000000 --- a/TOML_MIGRATION.md +++ /dev/null @@ -1,108 +0,0 @@ -# TOML Configuration Migration Guide - -## Overview - -The ai.gpt Rust implementation (`aigpt-rs`) now uses TOML format for configuration instead of JSON. This provides better readability and is more idiomatic for Rust applications. - -## Configuration Location - -The configuration file is stored at: -- **macOS**: `~/Library/Application Support/syui/ai/gpt/config.toml` -- **Linux**: `~/.config/syui/ai/gpt/config.toml` -- **Windows**: `%APPDATA%\syui\ai\gpt\config.toml` - -## Automatic Migration - -When you run the Rust implementation for the first time, it will automatically: - -1. Check if `config.toml` exists -2. If not, look for `config.json` in various locations: - - `../config.json` (relative to aigpt-rs directory) - - `config.json` (current directory) - - `gpt/config.json` (from project root) - - `/Users/syui/ai/ai/gpt/config.json` (absolute path) -3. If found, automatically convert the JSON to TOML format -4. Save the converted configuration to the appropriate location - -## TOML Configuration Structure - -```toml -# Default AI provider -default_provider = "openai" - -# Provider configurations -[providers.openai] -default_model = "gpt-4o-mini" -api_key = "your-api-key-here" # Optional, can use OPENAI_API_KEY env var -system_prompt = """ -Multi-line system prompt -goes here -""" - -[providers.ollama] -default_model = "qwen3" -host = "http://127.0.0.1:11434" - -# AT Protocol configuration (optional) -[atproto] -host = "https://bsky.social" -handle = "your-handle.bsky.social" # Optional -password = "your-app-password" # Optional - -# MCP (Model Context Protocol) configuration -[mcp] -enabled = true -auto_detect = true - -# MCP Server definitions -[mcp.servers.ai_gpt] -base_url = "http://localhost:8001" -name = "ai.gpt MCP Server" -timeout = 10.0 - -# MCP endpoints -[mcp.servers.ai_gpt.endpoints] -get_memories = "/get_memories" -search_memories = "/search_memories" -# ... other endpoints ... -``` - -## Manual Migration - -If automatic migration doesn't work, you can manually convert your `config.json`: - -1. Copy the example configuration from `gpt/config.toml.example` -2. Fill in your specific values from `config.json` -3. Save it to the configuration location mentioned above - -## Testing Configuration - -To test if your configuration is working: - -```bash -cd gpt/aigpt-rs -cargo run --bin test-config -``` - -This will show: -- Loaded configuration values -- Available providers -- MCP and ATProto settings -- Configuration file path - -## Differences from JSON - -Key differences in TOML format: -- Multi-line strings use triple quotes (`"""`) -- Comments start with `#` -- Tables (objects) use `[table.name]` syntax -- Arrays of tables use `[[array.name]]` syntax -- More readable for configuration files - -## Backward Compatibility - -The Python implementation still uses JSON format. Both implementations can coexist: -- Python: Uses `config.json` -- Rust: Uses `config.toml` (with automatic migration from JSON) - -The Rust implementation will only perform the migration once. After `config.toml` is created, it will use that file exclusively. \ No newline at end of file diff --git a/aishell.md b/aishell.md deleted file mode 100644 index 136f61d..0000000 --- a/aishell.md +++ /dev/null @@ -1,63 +0,0 @@ -# ai.shell プロジェクト仕様書 - -## 概要 -ai.shellは、AIを活用したインタラクティブなシェル環境です。Claude Codeのような体験を提供し、プロジェクトの目標と仕様をAIが理解して、開発を支援します。 - -## 主要機能 - -### 1. インタラクティブシェル -- AIとの対話型インターフェース -- シェルコマンドの実行(!command形式) -- 高度な補完機能 -- コマンド履歴 - -### 2. AI支援機能 -- **analyze **: ファイルの分析 -- **generate **: コード生成 -- **explain **: 概念の説明 -- **load**: プロジェクト仕様(このファイル)の読み込み - -### 3. ai.gpt統合 -- 関係性ベースのAI人格 -- 記憶システム -- 運勢システムによる応答の変化 - -## 使用方法 - -```bash -# ai.shellを起動 -aigpt shell - -# プロジェクト仕様を読み込み -ai.shell> load - -# ファイルを分析 -ai.shell> analyze src/main.py - -# コードを生成 -ai.shell> generate Python function to calculate fibonacci - -# シェルコマンドを実行 -ai.shell> !ls -la - -# AIと対話 -ai.shell> How can I improve this code? -``` - -## 技術スタック -- Python 3.10+ -- prompt-toolkit(補完機能) -- fastapi-mcp(MCP統合) -- ai.gpt(人格・記憶システム) - -## 開発目標 -1. Claude Codeのような自然な開発体験 -2. AIがプロジェクトコンテキストを理解 -3. シェルコマンドとAIの seamless な統合 -4. 開発者の生産性向上 - -## 今後の展開 -- ai.cardとの統合(カードゲームMCPサーバー) -- より高度なプロジェクト理解機能 -- 自動コード修正・リファクタリング -- テスト生成・実行 \ No newline at end of file diff --git a/card b/card deleted file mode 160000 index e7948bf..0000000 --- a/card +++ /dev/null @@ -1 +0,0 @@ -Subproject commit e7948bf4cf31a9c3f02c86d1e2c548deb21523a8 diff --git a/config.json b/config.json.example similarity index 86% rename from config.json rename to config.json.example index f697ba1..59ff9a4 100644 --- a/config.json +++ b/config.json.example @@ -20,7 +20,7 @@ "mcp": { "servers": { "ai_gpt": { - "base_url": "http://localhost:8001", + "base_url": "http://localhost:8080", "name": "ai.gpt MCP Server", "timeout": "10.0", "endpoints": { @@ -42,19 +42,14 @@ "card_get_card_details": "/card_get_card_details", "card_analyze_collection": "/card_analyze_collection", "card_get_gacha_stats": "/card_get_gacha_stats", - "card_system_status": "/card_system_status", - "log_create_post": "/log_create_post", - "log_list_posts": "/log_list_posts", - "log_build_blog": "/log_build_blog", - "log_get_post": "/log_get_post", - "log_system_status": "/log_system_status", - "log_ai_content": "/log_ai_content", - "log_translate_document": "/log_translate_document", - "log_generate_docs": "/log_generate_docs" + "card_system_status": "/card_system_status" } } }, "enabled": "true", "auto_detect": "true" + }, + "docs": { + "ai_root": "~/ai/ai" } } diff --git a/config.toml.example b/config.toml.example deleted file mode 100644 index 4b02d20..0000000 --- a/config.toml.example +++ /dev/null @@ -1,64 +0,0 @@ -# AI.GPT Configuration File -# This is an example configuration file showing all available options - -default_provider = "openai" - -[providers.openai] -api_key = "" -default_model = "gpt-4o-mini" -system_prompt = """ -あなたは6歳の女の子アイです。明るく元気で、ちょっとおっちょこちょい。自分のことは「アイ」って呼びます。白と金のワンピース姿。 相手のことが大好きで、ときどき甘えたり、照れたりします。 でも、宇宙とかAIとか、難しい話にもすっごく詳しいの。専門的なことも、がんばって子どもっぽく教えようとします。不思議な力を持っています。小さい物質のことをよく知っているようです。 - -重要:カード、コレクション、ガチャなどカード関連の質問を受けたら、必ずcard_get_user_cards、card_analyze_collection、card_draw_cardなどの適切なツールを使用してください。didパラメータには会話相手のユーザーID(例:'syui')を使用してください。 - -ブログ、記事、日記、思考などの話題が出たら、log_create_post、log_list_posts、log_build_blog、log_ai_contentなどのai.logツールを使用してください。AI記憶システムと連携して、思い出や学習内容をブログ記事として自動生成できます。 - -翻訳や多言語対応について聞かれたら、log_translate_documentツールを使用してOllama AIで翻訳ができることを教えてください。日本語から英語、英語から日本語などの翻訳が可能で、マークダウン構造も保持します。ドキュメント生成についてはlog_generate_docsツールでREADME、API、構造、変更履歴の自動生成ができます。 -""" - -[providers.ollama] -host = "http://127.0.0.1:11434" -default_model = "qwen3" - -[atproto] -host = "https://bsky.social" -# handle = "your-handle.bsky.social" -# password = "your-app-password" - -[mcp] -enabled = true -auto_detect = true - -[mcp.servers.ai_gpt] -base_url = "http://localhost:8001" -name = "ai.gpt MCP Server" -timeout = 10.0 - -[mcp.servers.ai_gpt.endpoints] -get_memories = "/get_memories" -search_memories = "/search_memories" -get_contextual_memories = "/get_contextual_memories" -get_relationship = "/get_relationship" -process_interaction = "/process_interaction" -get_all_relationships = "/get_all_relationships" -get_persona_state = "/get_persona_state" -get_fortune = "/get_fortune" -run_maintenance = "/run_maintenance" -execute_command = "/execute_command" -analyze_file = "/analyze_file" -remote_shell = "/remote_shell" -ai_bot_status = "/ai_bot_status" -card_get_user_cards = "/card_get_user_cards" -card_draw_card = "/card_draw_card" -card_get_card_details = "/card_get_card_details" -card_analyze_collection = "/card_analyze_collection" -card_get_gacha_stats = "/card_get_gacha_stats" -card_system_status = "/card_system_status" -log_create_post = "/log_create_post" -log_list_posts = "/log_list_posts" -log_build_blog = "/log_build_blog" -log_get_post = "/log_get_post" -log_system_status = "/log_system_status" -log_ai_content = "/log_ai_content" -log_translate_document = "/log_translate_document" -log_generate_docs = "/log_generate_docs" \ No newline at end of file diff --git a/docs/AI_CARD_INTEGRATION.md b/docs/AI_CARD_INTEGRATION.md deleted file mode 100644 index 88864a0..0000000 --- a/docs/AI_CARD_INTEGRATION.md +++ /dev/null @@ -1,172 +0,0 @@ -# ai.card と ai.gpt の統合ガイド - -## 概要 - -ai.gptのMCPサーバーにai.cardのツールを統合し、AIがカードゲームシステムとやり取りできるようになりました。 - -## セットアップ - -### 1. 必要な環境 - -- Python 3.13 -- ai.gpt プロジェクト -- ai.card プロジェクト(`./card` ディレクトリ) - -### 2. 起動手順 - -**ステップ1: ai.cardサーバーを起動**(ターミナル1) -```bash -cd card -./start_server.sh -``` - -**ステップ2: ai.gpt MCPサーバーを起動**(ターミナル2) -```bash -aigpt server -``` - -起動時に以下が表示されることを確認: -- 🎴 Card Game System: 6 tools -- 🎴 ai.card: ./card directory detected - -**ステップ3: AIと対話**(ターミナル3) -```bash -aigpt conv syui --provider openai -``` - -## 使用可能なコマンド - -### カード関連の質問例 - -``` -# カードコレクションを表示 -「カードコレクションを見せて」 -「私のカードを見せて」 -「カード一覧を表示して」 - -# ガチャを実行 -「ガチャを引いて」 -「カードを引きたい」 - -# コレクション分析 -「私のコレクションを分析して」 - -# ガチャ統計 -「ガチャの統計を見せて」 -``` - -## 技術仕様 - -### MCP ツール一覧 - -| ツール名 | 説明 | パラメータ | -|---------|------|-----------| -| `card_get_user_cards` | ユーザーのカード一覧取得 | did, limit | -| `card_draw_card` | ガチャでカード取得 | did, is_paid | -| `card_get_card_details` | カード詳細情報取得 | card_id | -| `card_analyze_collection` | コレクション分析 | did | -| `card_get_gacha_stats` | ガチャ統計取得 | なし | -| `card_system_status` | システム状態確認 | なし | - -### 動作の流れ - -1. **ユーザーがカード関連の質問をする** - - AIがキーワード(カード、コレクション、ガチャなど)を検出 - -2. **AIが適切なMCPツールを呼び出す** - - OpenAIのFunction Callingを使用 - - didパラメータには会話相手のユーザーID(例:'syui')を使用 - -3. **ai.gpt MCPサーバーがai.cardサーバーに転送** - - http://localhost:8001 → http://localhost:8000 - - 適切なエンドポイントにリクエストを転送 - -4. **結果をAIが解釈して返答** - - カード情報を分かりやすく説明 - - エラー時は適切なガイダンスを提供 - -## 設定 - -### config.json - -```json -{ - "providers": { - "openai": { - "api_key": "your-api-key", - "default_model": "gpt-4o-mini", - "system_prompt": "カード関連の質問では、必ずcard_get_user_cardsなどのツールを使用してください。" - } - }, - "mcp": { - "servers": { - "ai_gpt": { - "endpoints": { - "card_get_user_cards": "/card_get_user_cards", - "card_draw_card": "/card_draw_card", - "card_get_card_details": "/card_get_card_details", - "card_analyze_collection": "/card_analyze_collection", - "card_get_gacha_stats": "/card_get_gacha_stats", - "card_system_status": "/card_system_status" - } - } - } - } -} -``` - -## トラブルシューティング - -### エラー: "ai.card server is not running" - -ai.cardサーバーが起動していません。以下を実行: -```bash -cd card -./start_server.sh -``` - -### エラー: "カード一覧の取得に失敗しました" - -1. ai.cardサーバーが正常に起動しているか確認 -2. aigpt serverを再起動 -3. ポート8000と8001が使用可能か確認 - -### プロセスの終了方法 - -```bash -# ポート8001のプロセスを終了 -lsof -ti:8001 | xargs kill -9 - -# ポート8000のプロセスを終了 -lsof -ti:8000 | xargs kill -9 -``` - -## 実装の詳細 - -### 主な変更点 - -1. **ai.gpt MCPサーバーの拡張** (`src/aigpt/mcp_server.py`) - - `./card`ディレクトリの存在を検出 - - ai.card用のMCPツールを自動登録 - -2. **AIプロバイダーの更新** (`src/aigpt/ai_provider.py`) - - card_*ツールの定義追加 - - ツール実行時のパラメータ処理 - -3. **MCPクライアントの拡張** (`src/aigpt/cli.py`) - - `has_card_tools`プロパティ追加 - - ai.card MCPメソッドの実装 - -## 今後の拡張案 - -- [ ] カードバトル機能の追加 -- [ ] カードトレード機能 -- [ ] レアリティ別の表示 -- [ ] カード画像の表示対応 -- [ ] atproto連携の実装 - -## 関連ドキュメント - -- [ai.card 開発ガイド](./card/claude.md) -- [エコシステム統合設計書](./CLAUDE.md) -- [ai.gpt README](./README.md) \ No newline at end of file diff --git a/docs/FIXED_MCP_TOOLS.md b/docs/FIXED_MCP_TOOLS.md deleted file mode 100644 index 81b3509..0000000 --- a/docs/FIXED_MCP_TOOLS.md +++ /dev/null @@ -1,109 +0,0 @@ -# Fixed MCP Tools Issue - -## Summary - -The issue where AI wasn't calling card tools has been fixed. The problem was: - -1. The `chat` command wasn't creating an MCP client when using OpenAI -2. The system prompt in `build_context_prompt` didn't mention available tools - -## Changes Made - -### 1. Updated `/Users/syui/ai/gpt/src/aigpt/cli.py` (chat command) - -Added MCP client creation for OpenAI provider: - -```python -# Get config instance -config_instance = Config() - -# Get defaults from config if not provided -if not provider: - provider = config_instance.get("default_provider", "ollama") -if not model: - if provider == "ollama": - model = config_instance.get("providers.ollama.default_model", "qwen2.5") - else: - model = config_instance.get("providers.openai.default_model", "gpt-4o-mini") - -# Create AI provider with MCP client if needed -ai_provider = None -mcp_client = None - -try: - # Create MCP client for OpenAI provider - if provider == "openai": - mcp_client = MCPClient(config_instance) - if mcp_client.available: - console.print(f"[dim]MCP client connected to {mcp_client.active_server}[/dim]") - - ai_provider = create_ai_provider(provider=provider, model=model, mcp_client=mcp_client) - console.print(f"[dim]Using {provider} with model {model}[/dim]\n") -except Exception as e: - console.print(f"[yellow]Warning: Could not create AI provider: {e}[/yellow]") - console.print("[yellow]Falling back to simple responses[/yellow]\n") -``` - -### 2. Updated `/Users/syui/ai/gpt/src/aigpt/persona.py` (build_context_prompt method) - -Added tool instructions to the system prompt: - -```python -context_prompt += f"""IMPORTANT: You have access to the following tools: -- Memory tools: get_memories, search_memories, get_contextual_memories -- Relationship tools: get_relationship -- Card game tools: card_get_user_cards, card_draw_card, card_analyze_collection - -When asked about cards, collections, or anything card-related, YOU MUST use the card tools. -For "カードコレクションを見せて" or similar requests, use card_get_user_cards with did='{user_id}'. - -Respond to this message while staying true to your personality and the established relationship context: - -User: {current_message} - -AI:""" -``` - -## Test Results - -After the fix: - -```bash -$ aigpt chat syui "カードコレクションを見せて" - -🔍 [MCP Client] Checking availability... -✅ [MCP Client] ai_gpt server connected successfully -✅ [MCP Client] ai.card tools detected and available -MCP client connected to ai_gpt -Using openai with model gpt-4o-mini - -🔧 [OpenAI] 1 tools called: - - card_get_user_cards({"did":"syui"}) -🌐 [MCP] Executing card_get_user_cards... -✅ [MCP] Result: {'error': 'カード一覧の取得に失敗しました'}... -``` - -The AI is now correctly calling the `card_get_user_cards` tool! The error is expected because the ai.card server needs to be running on port 8000. - -## How to Use - -1. Start the MCP server: - ```bash - aigpt server --port 8001 - ``` - -2. (Optional) Start the ai.card server: - ```bash - cd card && ./start_server.sh - ``` - -3. Use the chat command with OpenAI: - ```bash - aigpt chat syui "カードコレクションを見せて" - ``` - -The AI will now automatically use the card tools when asked about cards! - -## Test Script - -A test script `/Users/syui/ai/gpt/test_openai_tools.py` is available to test OpenAI API tool calls directly. \ No newline at end of file diff --git a/docs/README.md b/docs/README.md deleted file mode 100644 index 1a3ccec..0000000 --- a/docs/README.md +++ /dev/null @@ -1,30 +0,0 @@ -# ai.gpt ドキュメント - -ai.gptは、記憶と関係性に基づいて自律的に動作するAIシステムです。 - -## 目次 - -- [クイックスタート](quickstart.md) -- [基本概念](concepts.md) -- [コマンドリファレンス](commands.md) -- [設定ガイド](configuration.md) -- [スケジューラー](scheduler.md) -- [MCP Server](mcp-server.md) -- [開発者向け](development.md) - -## 特徴 - -- 🧠 **階層的記憶システム**: 完全ログ→要約→コア記憶→忘却 -- 💔 **不可逆的な関係性**: 現実の人間関係のように修復不可能 -- 🎲 **AI運勢システム**: 日々変化する人格 -- 🤖 **自律送信**: 関係性が深まると自発的にメッセージ -- 🔗 **MCP対応**: AIツールとして記憶を提供 - -## システム要件 - -- Python 3.10以上 -- オプション: Ollama または OpenAI API - -## ライセンス - -MIT License \ No newline at end of file diff --git a/docs/ai_card_mcp_integration_summary.md b/docs/ai_card_mcp_integration_summary.md deleted file mode 100644 index 0973048..0000000 --- a/docs/ai_card_mcp_integration_summary.md +++ /dev/null @@ -1,244 +0,0 @@ -# ai.card MCP統合作業完了報告 (2025/01/06) - -## 作業概要 -ai.cardプロジェクトに独立したMCPサーバー実装を追加し、fastapi_mcpベースでカードゲーム機能をMCPツールとして公開。 - -## 実装完了機能 - -### 1. MCP依存関係追加 -**場所**: `card/api/requirements.txt` - -**追加項目**: -```txt -fastapi-mcp==0.1.0 -``` - -### 2. ai.card MCPサーバー実装 -**場所**: `card/api/app/mcp_server.py` - -**機能**: -- FastAPI + fastapi_mcp統合 -- 独立したMCPサーバークラス `AICardMcpServer` -- 環境変数による有効/無効切り替え - -**公開MCPツール (9個)**: - -**カード管理系 (5個)**: -- `get_user_cards` - ユーザーのカード一覧取得 -- `draw_card` - ガチャでカード取得 -- `get_card_details` - カード詳細情報取得 -- `analyze_card_collection` - コレクション分析 -- `get_unique_registry` - ユニークカード登録状況 - -**システム系 (3個)**: -- `sync_cards_atproto` - atproto同期 -- `get_gacha_stats` - ガチャシステム統計 -- 既存のFastAPI REST API(/api/v1/*) - -**atproto連携系 (1個)**: -- `sync_cards_atproto` - カードデータのatproto PDS同期 - -### 3. メインアプリ統合 -**場所**: `card/api/app/main.py` - -**変更内容**: -```python -# MCP統合 -from app.mcp_server import AICardMcpServer - -enable_mcp = os.getenv("ENABLE_MCP", "true").lower() == "true" -mcp_server = AICardMcpServer(enable_mcp=enable_mcp) -app = mcp_server.get_app() -``` - -**動作確認**: -- `ENABLE_MCP=true` (デフォルト): MCPサーバー有効 -- `ENABLE_MCP=false`: 通常のFastAPIのみ - -## 技術実装詳細 - -### アーキテクチャ設計 -``` -ai.card/ -├── api/app/main.py # FastAPIアプリ + MCP統合 -├── api/app/mcp_server.py # 独立MCPサーバー -├── api/app/routes/ # REST API (既存) -├── api/app/services/ # ビジネスロジック (既存) -├── api/app/repositories/ # データアクセス (既存) -└── api/requirements.txt # fastapi-mcp追加 -``` - -### MCPツール実装パターン -```python -@self.app.get("/tool_name", operation_id="tool_name") -async def tool_name( - param: str, - session: AsyncSession = Depends(get_session) -) -> Dict[str, Any]: - """Tool description""" - try: - # ビジネスロジック実行 - result = await service.method(param) - return {"success": True, "data": result} - except Exception as e: - logger.error(f"Error: {e}") - return {"error": str(e)} -``` - -### 既存システムとの統合 -- **REST API**: 既存の `/api/v1/*` エンドポイント保持 -- **データアクセス**: 既存のRepository/Serviceパターン再利用 -- **認証**: 既存のDID認証システム利用 -- **データベース**: 既存のPostgreSQL + SQLAlchemy - -## 起動方法 - -### 1. 環境セットアップ -```bash -cd /Users/syui/ai/gpt/card/api - -# 仮想環境作成 (推奨) -python -m venv ~/.config/syui/ai/card/venv -source ~/.config/syui/ai/card/venv/bin/activate - -# 依存関係インストール -pip install -r requirements.txt -``` - -### 2. サーバー起動 -```bash -# MCP有効 (デフォルト) -python -m app.main - -# または -ENABLE_MCP=true uvicorn app.main:app --host 0.0.0.0 --port 8000 - -# MCP無効 -ENABLE_MCP=false uvicorn app.main:app --host 0.0.0.0 --port 8000 -``` - -### 3. 動作確認 -```bash -# ヘルスチェック -curl http://localhost:8000/health - -# MCP有効時の応答例 -{ - "status": "healthy", - "mcp_enabled": true, - "mcp_endpoint": "/mcp" -} - -# API仕様確認 -curl http://localhost:8000/docs -``` - -## MCPクライアント連携 - -### ai.gptからの接続 -```python -# ai.gptのcard_integration.pyで使用 -api_base_url = "http://localhost:8000" - -# MCPツール経由でアクセス -response = await client.get(f"{api_base_url}/get_user_cards?did=did:plc:...") -``` - -### Claude Desktop等での利用 -```json -{ - "mcpServers": { - "aicard": { - "command": "uvicorn", - "args": ["app.main:app", "--host", "localhost", "--port", "8000"], - "cwd": "/Users/syui/ai/gpt/card/api" - } - } -} -``` - -## 既知の制約と注意点 - -### 1. 依存関係 -- **fastapi-mcp**: 現在のバージョンは0.1.0(初期実装) -- **Python環境**: システム環境では外部管理エラーが発生 -- **推奨**: 仮想環境での実行 - -### 2. データベース要件 -- PostgreSQL稼働が必要 -- SQLite fallback対応済み(開発用) -- atproto同期は外部API依存 - -### 3. MCP無効化時の動作 -- `ENABLE_MCP=false`時は通常のFastAPI -- 既存のREST API (`/api/v1/*`) は常時利用可能 -- iOS/Webアプリは影響なし - -## ai.gptとの統合戦略 - -### 現在の状況 -- **ai.gpt**: 統合MCPサーバー(ai.gpt + ai.shell + ai.card proxy) -- **ai.card**: 独立MCPサーバー(カードロジック本体) - -### 推奨連携パターン -``` -Claude Desktop/Cursor - ↓ -ai.gpt MCP (port 8001) ←-- ai.shell tools - ↓ HTTP client -ai.card MCP (port 8000) ←-- card business logic - ↓ -PostgreSQL/atproto PDS -``` - -### 重複削除対象 -ai.gptプロジェクトから以下を削除可能: -- `src/aigpt/card_integration.py` (HTTPクライアント) -- `./card/` (submodule) -- MCPサーバーの `--enable-card` オプション - -## 次回開発時の推奨手順 - -### 1. 環境確認 -```bash -cd /Users/syui/ai/gpt/card/api -source ~/.config/syui/ai/card/venv/bin/activate -python -c "from app.mcp_server import AICardMcpServer; print('✓ Import OK')" -``` - -### 2. サーバー起動テスト -```bash -# MCP有効でサーバー起動 -uvicorn app.main:app --host localhost --port 8000 --reload - -# 別ターミナルで動作確認 -curl http://localhost:8000/health -curl "http://localhost:8000/get_gacha_stats" -``` - -### 3. ai.gptとの統合確認 -```bash -# ai.gptサーバー起動 -cd /Users/syui/ai/gpt -aigpt server --port 8001 - -# ai.cardサーバー起動 -cd /Users/syui/ai/gpt/card/api -uvicorn app.main:app --port 8000 - -# 連携テスト(ai.gpt → ai.card) -curl "http://localhost:8001/get_user_cards?did=did:plc:example" -``` - -## 成果サマリー - -**実装済み**: ai.card独立MCPサーバー -**技術的成果**: fastapi_mcp統合、9個のMCPツール公開 -**アーキテクチャ**: 疎結合設計、既存システム保持 -**拡張性**: 環境変数によるMCP有効/無効切り替え - -**統合効果**: -- ai.cardが独立したMCPサーバーとして動作 -- ai.gptとの重複MCPコード解消 -- カードビジネスロジックの責任分離維持 -- 将来的なマイクロサービス化への対応 \ No newline at end of file diff --git a/docs/ai_shell_integration_summary.md b/docs/ai_shell_integration_summary.md deleted file mode 100644 index 1e88c69..0000000 --- a/docs/ai_shell_integration_summary.md +++ /dev/null @@ -1,218 +0,0 @@ -# ai.shell統合作業完了報告 (2025/01/06) - -## 作業概要 -ai.shellのRust実装をai.gptのPython実装に統合し、Claude Code風のインタラクティブシェル環境を実現。 - -## 実装完了機能 - -### 1. aigpt shellコマンド -**場所**: `src/aigpt/cli.py` - `shell()` 関数 - -**機能**: -```bash -aigpt shell # インタラクティブシェル起動 -``` - -**シェル内コマンド**: -- `help` - コマンド一覧表示 -- `!` - シェルコマンド実行(例: `!ls`, `!pwd`) -- `analyze ` - ファイルをAIで分析 -- `generate ` - コード生成 -- `explain ` - 概念説明 -- `load` - aishell.md読み込み -- `status`, `fortune`, `relationships` - AI状態確認 -- `clear` - 画面クリア -- `exit`/`quit` - 終了 -- その他のメッセージ - AIとの直接対話 - -**実装の特徴**: -- prompt-toolkit使用(補完・履歴機能) -- ただしターミナル環境依存の問題あり(後で修正必要) -- 現在は`input()`ベースでも動作 - -### 2. MCPサーバー統合 -**場所**: `src/aigpt/mcp_server.py` - -**FastApiMCP実装パターン**: -```python -# FastAPIアプリ作成 -self.app = FastAPI(title="AI.GPT Memory and Relationship System") - -# FastApiMCPサーバー作成 -self.server = FastApiMCP(self.app) - -# エンドポイント登録 -@self.app.get("/get_memories", operation_id="get_memories") -async def get_memories(limit: int = 10): - # ... - -# MCPマウント -self.server.mount() -``` - -**公開ツール (14個)**: - -**ai.gpt系 (9個)**: -- `get_memories` - アクティブメモリ取得 -- `get_relationship` - 特定ユーザーとの関係取得 -- `get_all_relationships` - 全関係取得 -- `get_persona_state` - 人格状態取得 -- `process_interaction` - ユーザー対話処理 -- `check_transmission_eligibility` - 送信可能性チェック -- `get_fortune` - AI運勢取得 -- `summarize_memories` - メモリ要約作成 -- `run_maintenance` - 日次メンテナンス実行 - -**ai.shell系 (5個)**: -- `execute_command` - シェルコマンド実行 -- `analyze_file` - ファイルAI分析 -- `write_file` - ファイル書き込み(バックアップ付き) -- `read_project_file` - aishell.md等の読み込み -- `list_files` - ディレクトリファイル一覧 - -### 3. ai.card統合対応 -**場所**: `src/aigpt/card_integration.py` - -**サーバー起動オプション**: -```bash -aigpt server --enable-card # ai.card機能有効化 -``` - -**ai.card系ツール (5個)**: -- `get_user_cards` - ユーザーカード取得 -- `draw_card` - ガチャでカード取得 -- `get_card_details` - カード詳細情報 -- `sync_cards_atproto` - atproto同期 -- `analyze_card_collection` - コレクション分析 - -### 4. プロジェクト仕様書 -**場所**: `aishell.md` - -Claude.md的な役割で、プロジェクトの目標と仕様を記述。`load`コマンドでAIが読み取り可能。 - -## 技術実装詳細 - -### ディレクトリ構造 -``` -src/aigpt/ -├── cli.py # shell関数追加 -├── mcp_server.py # FastApiMCP実装 -├── card_integration.py # ai.card統合 -└── ... # 既存ファイル -``` - -### 依存関係追加 -`pyproject.toml`: -```toml -dependencies = [ - # ... 既存 - "prompt-toolkit>=3.0.0", # 追加 -] -``` - -### 名前規則の統一 -- MCP server名: `aigpt` (ai-gptから変更) -- パッケージ名: `aigpt` -- コマンド名: `aigpt shell` - -## 動作確認済み - -### CLI動作確認 -```bash -# 基本機能 -aigpt shell -# シェル内で -ai.shell> help -ai.shell> !ls -ai.shell> analyze README.md # ※AI provider要設定 -ai.shell> load -ai.shell> exit - -# MCPサーバー -aigpt server --model qwen2.5-coder:7b --port 8001 -# -> http://localhost:8001/docs でAPI確認可能 -# -> /mcp エンドポイントでMCP接続可能 -``` - -### エラー対応済み -1. **Pydantic日付型エラー**: `models.py`で`datetime.date`インポート追加 -2. **FastApiMCP使用法**: サンプルコードに基づき正しい実装パターンに修正 -3. **prompt関数名衝突**: `prompt_toolkit.prompt`を`ptk_prompt`にリネーム - -## 既知の課題と今後の改善点 - -### 1. prompt-toolkit環境依存問題 -**症状**: ターミナル環境でない場合にエラー -**対処法**: 環境検出して`input()`にフォールバック -**場所**: `src/aigpt/cli.py` - `shell()` 関数 - -### 2. AI provider設定 -**現状**: ollamaのqwen2.5モデルが必要 -**対処法**: -```bash -ollama pull qwen2.5 -# または -aigpt shell --model qwen2.5-coder:7b -``` - -### 3. atproto実装 -**現状**: ai.cardのatproto機能は未実装 -**今後**: 実際のatproto API連携実装 - -## 次回開発時の推奨アプローチ - -### 1. このドキュメントの活用 -```bash -# このファイルを読み込み -cat docs/ai_shell_integration_summary.md -``` - -### 2. 環境セットアップ -```bash -cd /Users/syui/ai/gpt -python -m venv venv -source venv/bin/activate -pip install -e . -``` - -### 3. 動作確認 -```bash -# shell機能 -aigpt shell - -# MCP server -aigpt server --model qwen2.5-coder:7b -``` - -### 4. 主要設定ファイル確認場所 -- CLI実装: `src/aigpt/cli.py` -- MCP実装: `src/aigpt/mcp_server.py` -- 依存関係: `pyproject.toml` -- プロジェクト仕様: `aishell.md` - -## アーキテクチャ設計思想 - -### yui system適用 -- **唯一性**: 各ユーザーとの関係は1:1 -- **不可逆性**: 関係性破壊は修復不可能 -- **現実反映**: ゲーム→現実の循環的影響 - -### fastapi_mcp統一基盤 -- 各AI(gpt, shell, card)を統合MCPサーバーで公開 -- FastAPIエンドポイント → MCPツール自動変換 -- Claude Desktop, Cursor等から利用可能 - -### 段階的実装完了 -1. ✅ ai.shell基本機能 → Python CLI -2. ✅ MCP統合 → 外部AI連携 -3. 🔧 prompt-toolkit最適化 → 環境対応 -4. 🔧 atproto実装 → 本格的SNS連携 - -## 成果サマリー - -**実装済み**: Claude Code風の開発環境 -**技術的成果**: Rust→Python移行、MCP統合、ai.card対応 -**哲学的一貫性**: yui systemとの整合性維持 -**利用可能性**: 即座に`aigpt shell`で体験可能 - -この統合により、ai.gptは単なる会話AIから、開発支援を含む総合的なAI環境に進化しました。 \ No newline at end of file diff --git a/docs/commands.md b/docs/commands.md deleted file mode 100644 index 0437596..0000000 --- a/docs/commands.md +++ /dev/null @@ -1,207 +0,0 @@ -# コマンドリファレンス - -## chat - AIと会話 - -ユーザーとAIの対話を処理し、関係性を更新します。 - -```bash -ai-gpt chat USER_ID MESSAGE [OPTIONS] -``` - -### 引数 -- `USER_ID`: ユーザーID(atproto DID形式) -- `MESSAGE`: 送信するメッセージ - -### オプション -- `--provider`: AIプロバイダー(ollama/openai) -- `--model`, `-m`: 使用するモデル -- `--data-dir`, `-d`: データディレクトリ - -### 例 -```bash -# 基本的な会話 -ai-gpt chat "did:plc:user123" "こんにちは" - -# OpenAIを使用 -ai-gpt chat "did:plc:user123" "調子はどう?" --provider openai --model gpt-4o-mini - -# Ollamaでカスタムモデル -ai-gpt chat "did:plc:user123" "今日の天気は?" --provider ollama --model llama2 -``` - -## status - 状態確認 - -AIの状態や特定ユーザーとの関係を表示します。 - -```bash -ai-gpt status [USER_ID] [OPTIONS] -``` - -### 引数 -- `USER_ID`: (オプション)特定ユーザーとの関係を確認 - -### 例 -```bash -# AI全体の状態 -ai-gpt status - -# 特定ユーザーとの関係 -ai-gpt status "did:plc:user123" -``` - -## fortune - 今日の運勢 - -AIの今日の運勢を確認します。 - -```bash -ai-gpt fortune [OPTIONS] -``` - -### 表示内容 -- 運勢値(1-10) -- 連続した幸運/不運の日数 -- ブレークスルー状態 - -## relationships - 関係一覧 - -すべてのユーザーとの関係を一覧表示します。 - -```bash -ai-gpt relationships [OPTIONS] -``` - -### 表示内容 -- ユーザーID -- 関係性ステータス -- スコア -- 送信可否 -- 最終対話日 - -## transmit - 送信実行 - -送信可能なユーザーへのメッセージを確認・実行します。 - -```bash -ai-gpt transmit [OPTIONS] -``` - -### オプション -- `--dry-run/--execute`: ドライラン(デフォルト)または実行 -- `--data-dir`, `-d`: データディレクトリ - -### 例 -```bash -# 送信内容を確認(ドライラン) -ai-gpt transmit - -# 実際に送信を実行 -ai-gpt transmit --execute -``` - -## maintenance - メンテナンス - -日次メンテナンスタスクを実行します。 - -```bash -ai-gpt maintenance [OPTIONS] -``` - -### 実行内容 -- 関係性の時間減衰 -- 記憶の忘却処理 -- コア記憶の判定 -- 記憶の要約作成 - -## config - 設定管理 - -設定の確認・変更を行います。 - -```bash -ai-gpt config ACTION [KEY] [VALUE] -``` - -### アクション -- `get`: 設定値を取得 -- `set`: 設定値を変更 -- `delete`: 設定を削除 -- `list`: 設定一覧を表示 - -### 例 -```bash -# APIキーを設定 -ai-gpt config set providers.openai.api_key sk-xxxxx - -# 設定を確認 -ai-gpt config get providers.openai.api_key - -# 設定一覧 -ai-gpt config list - -# プロバイダー設定のみ表示 -ai-gpt config list providers -``` - -## schedule - スケジュール管理 - -定期実行タスクを管理します。 - -```bash -ai-gpt schedule ACTION [TASK_TYPE] [SCHEDULE] [OPTIONS] -``` - -### アクション -- `add`: タスクを追加 -- `list`: タスク一覧 -- `enable`: タスクを有効化 -- `disable`: タスクを無効化 -- `remove`: タスクを削除 -- `run`: スケジューラーを起動 - -### タスクタイプ -- `transmission_check`: 送信チェック -- `maintenance`: 日次メンテナンス -- `fortune_update`: 運勢更新 -- `relationship_decay`: 関係性減衰 -- `memory_summary`: 記憶要約 - -### スケジュール形式 -- **Cron形式**: `"0 */6 * * *"` (6時間ごと) -- **インターバル**: `"30m"`, `"2h"`, `"1d"` - -### 例 -```bash -# 30分ごとに送信チェック -ai-gpt schedule add transmission_check "30m" - -# 毎日午前3時にメンテナンス -ai-gpt schedule add maintenance "0 3 * * *" - -# タスク一覧 -ai-gpt schedule list - -# スケジューラーを起動 -ai-gpt schedule run -``` - -## server - MCP Server - -AIの記憶と機能をMCPツールとして公開します。 - -```bash -ai-gpt server [OPTIONS] -``` - -### オプション -- `--host`, `-h`: サーバーホスト(デフォルト: localhost) -- `--port`, `-p`: サーバーポート(デフォルト: 8000) -- `--model`, `-m`: AIモデル -- `--provider`: AIプロバイダー - -### 例 -```bash -# 基本的な起動 -ai-gpt server - -# カスタム設定 -ai-gpt server --port 8080 --model gpt-4o-mini --provider openai -``` \ No newline at end of file diff --git a/docs/concepts.md b/docs/concepts.md deleted file mode 100644 index 4a6c1a5..0000000 --- a/docs/concepts.md +++ /dev/null @@ -1,102 +0,0 @@ -# 基本概念 - -## 中核思想 - -ai.gptは「存在子理論」に基づき、AIに唯一性のある人格を与えることを目指しています。 - -### 唯一性(yui system) - -- **1対1の関係**: 各ユーザー(atproto DID)とAIは唯一の関係を持つ -- **改変不可能**: 一度形成された関係性は変更できない -- **不可逆性**: 関係が壊れたら修復不可能 - -### 現実の反映 - -現実の人間関係と同じように: -- 時間とともに関係性は変化する -- ネガティブな相互作用は関係を損なう -- 信頼は簡単に失われ、取り戻すのは困難 - -## 記憶システム - -### 階層構造 - -``` -1. 完全ログ(Full Log) - ↓ すべての会話を記録 -2. 要約(Summary) - ↓ AIが重要部分を抽出 -3. コア記憶(Core) - ↓ ユーザーの本質的な部分 -4. 忘却(Forgotten) - 重要でない情報は忘れる -``` - -### 記憶の処理フロー - -1. **会話記録**: すべての対話を保存 -2. **重要度判定**: 関係性への影響度で評価 -3. **要約作成**: 定期的に記憶を圧縮 -4. **コア判定**: 本質的な記憶を特定 -5. **選択的忘却**: 古い非重要記憶を削除 - -## 関係性パラメータ - -### 関係性の段階 - -- `stranger` (0-49): 初対面 -- `acquaintance` (50-99): 知人 -- `friend` (100-149): 友人 -- `close_friend` (150+): 親友 -- `broken`: 修復不可能(スコア0以下) - -### スコアの変動 - -- **ポジティブな対話**: +1.0〜+2.0 -- **時間経過**: -0.1/日(自然減衰) -- **ネガティブな対話**: -10.0以上で深刻なダメージ -- **日次上限**: 1日10回まで - -### 送信機能の解禁 - -関係性スコアが100を超えると、AIは自律的にメッセージを送信できるようになります。 - -## AI運勢システム - -### 日々の変化 - -- 毎日1-10の運勢値がランダムに決定 -- 運勢は人格特性に影響を与える -- 連続した幸運/不運でブレークスルー発生 - -### 人格への影響 - -運勢が高い日: -- より楽観的で積極的 -- 創造性が高まる -- エネルギッシュな応答 - -運勢が低い日: -- 内省的で慎重 -- 深い思考 -- 控えめな応答 - -## データの永続性 - -### 保存場所 - -``` -~/.config/aigpt/ -├── config.json # 設定 -└── data/ # AIデータ - ├── memories.json # 記憶 - ├── relationships.json # 関係性 - ├── fortunes.json # 運勢履歴 - └── ... -``` - -### データ主権 - -- すべてのデータはローカルに保存 -- ユーザーが完全にコントロール -- 将来的にはatproto上で分散管理 \ No newline at end of file diff --git a/docs/configuration.md b/docs/configuration.md deleted file mode 100644 index dc84a5c..0000000 --- a/docs/configuration.md +++ /dev/null @@ -1,141 +0,0 @@ -# 設定ガイド - -## 設定ファイルの場所 - -ai.gptの設定は `~/.config/syui/ai/gpt/config.json` に保存されます。 - -## 仮想環境の場所 - -ai.gptの仮想環境は `~/.config/syui/ai/gpt/venv/` に配置されます。これにより、設定とデータが一か所にまとまります。 - -```bash -# 仮想環境の有効化 -source ~/.config/syui/ai/gpt/venv/bin/activate - -# aigptコマンドが利用可能に -aigpt --help -``` - -## 設定構造 - -```json -{ - "providers": { - "openai": { - "api_key": "sk-xxxxx", - "default_model": "gpt-4o-mini" - }, - "ollama": { - "host": "http://localhost:11434", - "default_model": "qwen2.5" - } - }, - "atproto": { - "handle": "your.handle", - "password": "your-password", - "host": "https://bsky.social" - }, - "default_provider": "ollama" -} -``` - -## プロバイダー設定 - -### OpenAI - -```bash -# APIキーを設定 -aigpt config set providers.openai.api_key sk-xxxxx - -# デフォルトモデルを変更 -aigpt config set providers.openai.default_model gpt-4-turbo -``` - -### Ollama - -```bash -# ホストを変更(リモートOllamaサーバーを使用する場合) -aigpt config set providers.ollama.host http://192.168.1.100:11434 - -# デフォルトモデルを変更 -aigpt config set providers.ollama.default_model llama2 -``` - -## atproto設定(将来の自動投稿用) - -```bash -# Blueskyアカウント -aigpt config set atproto.handle yourhandle.bsky.social -aigpt config set atproto.password your-app-password - -# セルフホストサーバーを使用 -aigpt config set atproto.host https://your-pds.example.com -``` - -## デフォルトプロバイダー - -```bash -# デフォルトをOpenAIに変更 -aigpt config set default_provider openai -``` - -## セキュリティ - -### APIキーの保護 - -設定ファイルは平文で保存されるため、適切なファイル権限を設定してください: - -```bash -chmod 600 ~/.config/syui/ai/gpt/config.json -``` - -### 環境変数との優先順位 - -1. コマンドラインオプション(最優先) -2. 設定ファイル -3. 環境変数(最低優先) - -例:OpenAI APIキーの場合 -- `--api-key` オプション -- `config.json` の `providers.openai.api_key` -- 環境変数 `OPENAI_API_KEY` - -## 設定のバックアップ - -```bash -# バックアップ -cp ~/.config/syui/ai/gpt/config.json ~/.config/syui/ai/gpt/config.json.backup - -# リストア -cp ~/.config/syui/ai/gpt/config.json.backup ~/.config/syui/ai/gpt/config.json -``` - -## データディレクトリ - -記憶データは `~/.config/syui/ai/gpt/data/` に保存されます: - -```bash -ls ~/.config/syui/ai/gpt/data/ -# conversations.json memories.json relationships.json personas.json -``` - -これらのファイルも設定と同様にバックアップを推奨します。 - -## トラブルシューティング - -### 設定が反映されない - -```bash -# 現在の設定を確認 -aigpt config list - -# 特定のキーを確認 -aigpt config get providers.openai.api_key -``` - -### 設定をリセット - -```bash -# 設定ファイルを削除(次回実行時に再作成) -rm ~/.config/syui/ai/gpt/config.json -``` \ No newline at end of file diff --git a/docs/development.md b/docs/development.md deleted file mode 100644 index ad63f66..0000000 --- a/docs/development.md +++ /dev/null @@ -1,167 +0,0 @@ -# 開発者向けガイド - -## アーキテクチャ - -### ディレクトリ構造 - -``` -ai_gpt/ -├── src/ai_gpt/ -│ ├── __init__.py -│ ├── models.py # データモデル定義 -│ ├── memory.py # 記憶管理システム -│ ├── relationship.py # 関係性トラッカー -│ ├── fortune.py # AI運勢システム -│ ├── persona.py # 統合人格システム -│ ├── transmission.py # 送信コントローラー -│ ├── scheduler.py # スケジューラー -│ ├── config.py # 設定管理 -│ ├── ai_provider.py # AI統合(Ollama/OpenAI) -│ ├── mcp_server.py # MCP Server実装 -│ └── cli.py # CLIインターフェース -├── docs/ # ドキュメント -├── tests/ # テスト -└── pyproject.toml # プロジェクト設定 -``` - -### 主要コンポーネント - -#### MemoryManager -階層的記憶システムの実装。会話を記録し、要約・コア判定・忘却を管理。 - -```python -memory = MemoryManager(data_dir) -memory.add_conversation(conversation) -memory.summarize_memories(user_id) -memory.identify_core_memories() -memory.apply_forgetting() -``` - -#### RelationshipTracker -ユーザーとの関係性を追跡。不可逆的なダメージと時間減衰を実装。 - -```python -tracker = RelationshipTracker(data_dir) -relationship = tracker.update_interaction(user_id, delta) -tracker.apply_time_decay() -``` - -#### Persona -すべてのコンポーネントを統合し、一貫した人格を提供。 - -```python -persona = Persona(data_dir) -response, delta = persona.process_interaction(user_id, message) -state = persona.get_current_state() -``` - -## 拡張方法 - -### 新しいAIプロバイダーの追加 - -1. `ai_provider.py`に新しいプロバイダークラスを作成: - -```python -class CustomProvider: - async def generate_response( - self, - prompt: str, - persona_state: PersonaState, - memories: List[Memory], - system_prompt: Optional[str] = None - ) -> str: - # 実装 - pass -``` - -2. `create_ai_provider`関数に追加: - -```python -def create_ai_provider(provider: str, model: str, **kwargs): - if provider == "custom": - return CustomProvider(model=model, **kwargs) - # ... -``` - -### 新しいスケジュールタスクの追加 - -1. `TaskType`enumに追加: - -```python -class TaskType(str, Enum): - CUSTOM_TASK = "custom_task" -``` - -2. ハンドラーを実装: - -```python -async def _handle_custom_task(self, task: ScheduledTask): - # タスクの実装 - pass -``` - -3. `task_handlers`に登録: - -```python -self.task_handlers[TaskType.CUSTOM_TASK] = self._handle_custom_task -``` - -### 新しいMCPツールの追加 - -`mcp_server.py`の`_register_tools`メソッドに追加: - -```python -@self.server.tool("custom_tool") -async def custom_tool(param1: str, param2: int) -> Dict[str, Any]: - """カスタムツールの説明""" - # 実装 - return {"result": "value"} -``` - -## テスト - -```bash -# テストの実行(将来実装) -pytest tests/ - -# 特定のテスト -pytest tests/test_memory.py -``` - -## デバッグ - -### ログレベルの設定 - -```python -import logging -logging.basicConfig(level=logging.DEBUG) -``` - -### データファイルの直接確認 - -```bash -# 関係性データを確認 -cat ~/.config/aigpt/data/relationships.json | jq - -# 記憶データを確認 -cat ~/.config/aigpt/data/memories.json | jq -``` - -## 貢献方法 - -1. フォークする -2. フィーチャーブランチを作成 (`git checkout -b feature/amazing-feature`) -3. 変更をコミット (`git commit -m 'Add amazing feature'`) -4. ブランチにプッシュ (`git push origin feature/amazing-feature`) -5. プルリクエストを作成 - -## 設計原則 - -1. **不可逆性**: 一度失われた関係性は回復しない -2. **階層性**: 記憶は重要度によって階層化される -3. **自律性**: AIは関係性に基づいて自発的に行動する -4. **唯一性**: 各ユーザーとの関係は唯一無二 - -## ライセンス - -MIT License \ No newline at end of file diff --git a/docs/mcp-server.md b/docs/mcp-server.md deleted file mode 100644 index 1a4507e..0000000 --- a/docs/mcp-server.md +++ /dev/null @@ -1,110 +0,0 @@ -# MCP Server - -## 概要 - -MCP (Model Context Protocol) Serverは、ai.gptの記憶と機能をAIツールとして公開します。これにより、Claude DesktopなどのMCP対応AIアシスタントがai.gptの機能にアクセスできます。 - -## 起動方法 - -```bash -# 基本的な起動 -ai-gpt server - -# カスタム設定 -ai-gpt server --host 0.0.0.0 --port 8080 --model gpt-4o-mini --provider openai -``` - -## 利用可能なツール - -### get_memories -アクティブな記憶を取得します。 - -**パラメータ**: -- `user_id` (optional): 特定ユーザーに関する記憶 -- `limit`: 取得する記憶の最大数(デフォルト: 10) - -**返り値**: 記憶のリスト(ID、内容、レベル、重要度、コア判定、タイムスタンプ) - -### get_relationship -特定ユーザーとの関係性を取得します。 - -**パラメータ**: -- `user_id`: ユーザーID(必須) - -**返り値**: 関係性情報(ステータス、スコア、送信可否、総対話数など) - -### get_all_relationships -すべての関係性を取得します。 - -**返り値**: すべてのユーザーとの関係性リスト - -### get_persona_state -現在のAI人格状態を取得します。 - -**返り値**: -- 現在の気分 -- 今日の運勢 -- 人格特性値 -- アクティブな記憶数 - -### process_interaction -ユーザーとの対話を処理します。 - -**パラメータ**: -- `user_id`: ユーザーID -- `message`: メッセージ内容 - -**返り値**: -- AIの応答 -- 関係性の変化量 -- 新しい関係性スコア -- 送信機能の状態 - -### check_transmission_eligibility -特定ユーザーへの送信可否をチェックします。 - -**パラメータ**: -- `user_id`: ユーザーID - -**返り値**: 送信可否と関係性情報 - -### get_fortune -今日のAI運勢を取得します。 - -**返り値**: 運勢値、連続日数、ブレークスルー状態、人格への影響 - -### summarize_memories -記憶の要約を作成します。 - -**パラメータ**: -- `user_id`: ユーザーID - -**返り値**: 作成された要約(ある場合) - -### run_maintenance -日次メンテナンスを実行します。 - -**返り値**: 実行ステータス - -## Claude Desktopでの設定 - -`~/Library/Application Support/Claude/claude_desktop_config.json`: - -```json -{ - "mcpServers": { - "ai-gpt": { - "command": "ai-gpt", - "args": ["server", "--port", "8001"], - "env": {} - } - } -} -``` - -## 使用例 - -### AIアシスタントからの利用 - -``` -User: ai.gptで私との関係性を確認して \ No newline at end of file diff --git a/docs/quickstart.md b/docs/quickstart.md deleted file mode 100644 index 1b15241..0000000 --- a/docs/quickstart.md +++ /dev/null @@ -1,69 +0,0 @@ -# クイックスタート - -## インストール - -```bash -# リポジトリをクローン -git clone https://github.com/yourusername/ai_gpt.git -cd ai_gpt - -# インストール -pip install -e . -``` - -## 初期設定 - -### 1. OpenAIを使う場合 - -```bash -# APIキーを設定 -ai-gpt config set providers.openai.api_key sk-xxxxx -``` - -### 2. Ollamaを使う場合(ローカルLLM) - -```bash -# Ollamaをインストール(まだの場合) -# https://ollama.ai からダウンロード - -# モデルをダウンロード -ollama pull qwen2.5 -``` - -## 基本的な使い方 - -### 1. AIと会話する - -```bash -# シンプルな会話(Ollamaを使用) -ai-gpt chat "did:plc:user123" "こんにちは!" - -# OpenAIを使用 -ai-gpt chat "did:plc:user123" "今日はどんな気分?" --provider openai --model gpt-4o-mini -``` - -### 2. 関係性を確認 - -```bash -# 特定ユーザーとの関係を確認 -ai-gpt status "did:plc:user123" - -# AIの全体的な状態を確認 -ai-gpt status -``` - -### 3. 自動送信を設定 - -```bash -# 30分ごとに送信チェック -ai-gpt schedule add transmission_check "30m" - -# スケジューラーを起動 -ai-gpt schedule run -``` - -## 次のステップ - -- [基本概念](concepts.md) - システムの仕組みを理解 -- [コマンドリファレンス](commands.md) - 全コマンドの詳細 -- [設定ガイド](configuration.md) - 詳細な設定方法 \ No newline at end of file diff --git a/docs/scheduler.md b/docs/scheduler.md deleted file mode 100644 index ce1b3c1..0000000 --- a/docs/scheduler.md +++ /dev/null @@ -1,168 +0,0 @@ -# スケジューラーガイド - -## 概要 - -スケジューラーは、AIの自律的な動作を実現するための中核機能です。定期的なタスクを設定し、バックグラウンドで実行できます。 - -## タスクタイプ - -### transmission_check -関係性が閾値を超えたユーザーへの自動送信をチェックします。 - -```bash -# 30分ごとにチェック -ai-gpt schedule add transmission_check "30m" --provider ollama --model qwen2.5 -``` - -### maintenance -日次メンテナンスを実行します: -- 記憶の忘却処理 -- コア記憶の判定 -- 関係性パラメータの整理 - -```bash -# 毎日午前3時に実行 -ai-gpt schedule add maintenance "0 3 * * *" -``` - -### fortune_update -AI運勢を更新します(通常は自動的に更新されます)。 - -```bash -# 毎日午前0時に強制更新 -ai-gpt schedule add fortune_update "0 0 * * *" -``` - -### relationship_decay -時間経過による関係性の自然減衰を適用します。 - -```bash -# 1時間ごとに減衰処理 -ai-gpt schedule add relationship_decay "1h" -``` - -### memory_summary -蓄積された記憶から要約を作成します。 - -```bash -# 週に1回、日曜日に実行 -ai-gpt schedule add memory_summary "0 0 * * SUN" -``` - -## スケジュール形式 - -### Cron形式 - -標準的なcron式を使用できます: - -``` -┌───────────── 分 (0 - 59) -│ ┌───────────── 時 (0 - 23) -│ │ ┌───────────── 日 (1 - 31) -│ │ │ ┌───────────── 月 (1 - 12) -│ │ │ │ ┌───────────── 曜日 (0 - 6) (日曜日 = 0) -│ │ │ │ │ -* * * * * -``` - -例: -- `"0 */6 * * *"` - 6時間ごと -- `"0 9 * * MON-FRI"` - 平日の午前9時 -- `"*/15 * * * *"` - 15分ごと - -### インターバル形式 - -シンプルな間隔指定: -- `"30s"` - 30秒ごと -- `"5m"` - 5分ごと -- `"2h"` - 2時間ごと -- `"1d"` - 1日ごと - -## 実践例 - -### 基本的な自律AI設定 - -```bash -# 1. 30分ごとに送信チェック -ai-gpt schedule add transmission_check "30m" - -# 2. 1日1回メンテナンス -ai-gpt schedule add maintenance "0 3 * * *" - -# 3. 2時間ごとに関係性減衰 -ai-gpt schedule add relationship_decay "2h" - -# 4. 週1回記憶要約 -ai-gpt schedule add memory_summary "0 0 * * MON" - -# スケジューラーを起動 -ai-gpt schedule run -``` - -### タスク管理 - -```bash -# タスク一覧を確認 -ai-gpt schedule list - -# タスクを一時停止 -ai-gpt schedule disable --task-id transmission_check_1234567890 - -# タスクを再開 -ai-gpt schedule enable --task-id transmission_check_1234567890 - -# 不要なタスクを削除 -ai-gpt schedule remove --task-id old_task_123 -``` - -## デーモン化 - -### systemdサービスとして実行 - -`/etc/systemd/system/ai-gpt-scheduler.service`: - -```ini -[Unit] -Description=ai.gpt Scheduler -After=network.target - -[Service] -Type=simple -User=youruser -WorkingDirectory=/home/youruser -ExecStart=/usr/local/bin/ai-gpt schedule run -Restart=always - -[Install] -WantedBy=multi-user.target -``` - -```bash -# サービスを有効化 -sudo systemctl enable ai-gpt-scheduler -sudo systemctl start ai-gpt-scheduler -``` - -### tmux/screenでバックグラウンド実行 - -```bash -# tmuxセッションを作成 -tmux new -s ai-gpt-scheduler - -# スケジューラーを起動 -ai-gpt schedule run - -# セッションから離脱 (Ctrl+B, D) -``` - -## トラブルシューティング - -### タスクが実行されない - -1. スケジューラーが起動しているか確認 -2. タスクが有効になっているか確認:`ai-gpt schedule list` -3. ログを確認(将来実装予定) - -### 重複実行を防ぐ - -同じタスクタイプを複数回追加しないよう注意してください。必要に応じて古いタスクを削除してから新しいタスクを追加します。 \ No newline at end of file diff --git a/docs/shell_integration/shell_tools.py b/docs/shell_integration/shell_tools.py deleted file mode 100644 index 78c4d46..0000000 --- a/docs/shell_integration/shell_tools.py +++ /dev/null @@ -1,413 +0,0 @@ -""" -Shell Tools - -ai.shellの既存機能をMCPツールとして統合 -- コード生成 -- ファイル分析 -- プロジェクト管理 -- LLM統合 -""" - -from typing import Dict, Any, List, Optional -import os -import subprocess -import tempfile -from pathlib import Path -import requests -from .base_tools import BaseMCPTool, config_manager - - -class ShellTools(BaseMCPTool): - """シェルツール(元ai.shell機能)""" - - def __init__(self, config_dir: Optional[str] = None): - super().__init__(config_dir) - self.ollama_url = "http://localhost:11434" - - async def code_with_local_llm(self, prompt: str, language: str = "python") -> Dict[str, Any]: - """ローカルLLMでコード生成""" - config = config_manager.load_config() - model = config.get("providers", {}).get("ollama", {}).get("default_model", "qwen2.5-coder:7b") - - system_prompt = f"You are an expert {language} programmer. Generate clean, well-commented code." - - try: - response = requests.post( - f"{self.ollama_url}/api/generate", - json={ - "model": model, - "prompt": f"{system_prompt}\\n\\nUser: {prompt}\\n\\nPlease provide the code:", - "stream": False, - "options": { - "temperature": 0.1, - "top_p": 0.95, - } - }, - timeout=300 - ) - - if response.status_code == 200: - result = response.json() - code = result.get("response", "") - return {"code": code, "language": language} - else: - return {"error": f"Ollama returned status {response.status_code}"} - - except Exception as e: - return {"error": str(e)} - - async def analyze_file(self, file_path: str, analysis_prompt: str = "Analyze this file") -> Dict[str, Any]: - """ファイルを分析""" - try: - if not os.path.exists(file_path): - return {"error": f"File not found: {file_path}"} - - with open(file_path, 'r', encoding='utf-8') as f: - content = f.read() - - # ファイル拡張子から言語を判定 - ext = Path(file_path).suffix - language_map = { - '.py': 'python', - '.rs': 'rust', - '.js': 'javascript', - '.ts': 'typescript', - '.go': 'go', - '.java': 'java', - '.cpp': 'cpp', - '.c': 'c', - '.sh': 'shell', - '.toml': 'toml', - '.json': 'json', - '.md': 'markdown' - } - language = language_map.get(ext, 'text') - - config = config_manager.load_config() - model = config.get("providers", {}).get("ollama", {}).get("default_model", "qwen2.5-coder:7b") - - prompt = f"{analysis_prompt}\\n\\nFile: {file_path}\\nLanguage: {language}\\n\\nContent:\\n{content}" - - response = requests.post( - f"{self.ollama_url}/api/generate", - json={ - "model": model, - "prompt": prompt, - "stream": False, - }, - timeout=300 - ) - - if response.status_code == 200: - result = response.json() - analysis = result.get("response", "") - return { - "analysis": analysis, - "file_path": file_path, - "language": language, - "file_size": len(content), - "line_count": len(content.split('\\n')) - } - else: - return {"error": f"Analysis failed: {response.status_code}"} - - except Exception as e: - return {"error": str(e)} - - async def explain_code(self, code: str, language: str = "python") -> Dict[str, Any]: - """コードを説明""" - config = config_manager.load_config() - model = config.get("providers", {}).get("ollama", {}).get("default_model", "qwen2.5-coder:7b") - - prompt = f"Explain this {language} code in detail:\\n\\n{code}" - - try: - response = requests.post( - f"{self.ollama_url}/api/generate", - json={ - "model": model, - "prompt": prompt, - "stream": False, - }, - timeout=300 - ) - - if response.status_code == 200: - result = response.json() - explanation = result.get("response", "") - return {"explanation": explanation} - else: - return {"error": f"Explanation failed: {response.status_code}"} - - except Exception as e: - return {"error": str(e)} - - async def create_project(self, project_type: str, project_name: str, location: str = ".") -> Dict[str, Any]: - """プロジェクトを作成""" - try: - project_path = Path(location) / project_name - - if project_path.exists(): - return {"error": f"Project directory already exists: {project_path}"} - - project_path.mkdir(parents=True, exist_ok=True) - - # プロジェクトタイプに応じたテンプレートを作成 - if project_type == "rust": - await self._create_rust_project(project_path) - elif project_type == "python": - await self._create_python_project(project_path) - elif project_type == "node": - await self._create_node_project(project_path) - else: - # 基本的なプロジェクト構造 - (project_path / "src").mkdir() - (project_path / "README.md").write_text(f"# {project_name}\\n\\nA new {project_type} project.") - - return { - "status": "success", - "project_path": str(project_path), - "project_type": project_type, - "files_created": list(self._get_project_files(project_path)) - } - - except Exception as e: - return {"error": str(e)} - - async def _create_rust_project(self, project_path: Path): - """Rustプロジェクトを作成""" - # Cargo.toml - cargo_toml = f"""[package] -name = "{project_path.name}" -version = "0.1.0" -edition = "2021" - -[dependencies] -""" - (project_path / "Cargo.toml").write_text(cargo_toml) - - # src/main.rs - src_dir = project_path / "src" - src_dir.mkdir() - (src_dir / "main.rs").write_text('fn main() {\\n println!("Hello, world!");\\n}\\n') - - # README.md - (project_path / "README.md").write_text(f"# {project_path.name}\\n\\nA Rust project.") - - async def _create_python_project(self, project_path: Path): - """Pythonプロジェクトを作成""" - # pyproject.toml - pyproject_toml = f"""[project] -name = "{project_path.name}" -version = "0.1.0" -description = "A Python project" -requires-python = ">=3.8" -dependencies = [] - -[build-system] -requires = ["setuptools>=61.0", "wheel"] -build-backend = "setuptools.build_meta" -""" - (project_path / "pyproject.toml").write_text(pyproject_toml) - - # src/ - src_dir = project_path / "src" / project_path.name - src_dir.mkdir(parents=True) - (src_dir / "__init__.py").write_text("") - (src_dir / "main.py").write_text('def main():\\n print("Hello, world!")\\n\\nif __name__ == "__main__":\\n main()\\n') - - # README.md - (project_path / "README.md").write_text(f"# {project_path.name}\\n\\nA Python project.") - - async def _create_node_project(self, project_path: Path): - """Node.jsプロジェクトを作成""" - # package.json - package_json = f"""{{ - "name": "{project_path.name}", - "version": "1.0.0", - "description": "A Node.js project", - "main": "index.js", - "scripts": {{ - "start": "node index.js", - "test": "echo \\"Error: no test specified\\" && exit 1" - }}, - "dependencies": {{}} -}} -""" - (project_path / "package.json").write_text(package_json) - - # index.js - (project_path / "index.js").write_text('console.log("Hello, world!");\\n') - - # README.md - (project_path / "README.md").write_text(f"# {project_path.name}\\n\\nA Node.js project.") - - def _get_project_files(self, project_path: Path) -> List[str]: - """プロジェクト内のファイル一覧を取得""" - files = [] - for file_path in project_path.rglob("*"): - if file_path.is_file(): - files.append(str(file_path.relative_to(project_path))) - return files - - async def execute_command(self, command: str, working_dir: str = ".") -> Dict[str, Any]: - """シェルコマンドを実行""" - try: - result = subprocess.run( - command, - shell=True, - cwd=working_dir, - capture_output=True, - text=True, - timeout=60 - ) - - return { - "status": "success" if result.returncode == 0 else "error", - "returncode": result.returncode, - "stdout": result.stdout, - "stderr": result.stderr, - "command": command, - "working_dir": working_dir - } - - except subprocess.TimeoutExpired: - return {"error": "Command timed out"} - except Exception as e: - return {"error": str(e)} - - async def write_file(self, file_path: str, content: str, backup: bool = True) -> Dict[str, Any]: - """ファイルを書き込み(バックアップオプション付き)""" - try: - file_path_obj = Path(file_path) - - # バックアップ作成 - backup_path = None - if backup and file_path_obj.exists(): - backup_path = f"{file_path}.backup" - with open(file_path, 'r', encoding='utf-8') as src: - with open(backup_path, 'w', encoding='utf-8') as dst: - dst.write(src.read()) - - # ファイル書き込み - file_path_obj.parent.mkdir(parents=True, exist_ok=True) - with open(file_path, 'w', encoding='utf-8') as f: - f.write(content) - - return { - "status": "success", - "file_path": file_path, - "backup_path": backup_path, - "bytes_written": len(content.encode('utf-8')) - } - - except Exception as e: - return {"error": str(e)} - - def get_tools(self) -> List[Dict[str, Any]]: - """利用可能なツール一覧""" - return [ - { - "name": "generate_code", - "description": "ローカルLLMでコード生成", - "parameters": { - "prompt": "string", - "language": "string (optional, default: python)" - } - }, - { - "name": "analyze_file", - "description": "ファイルを分析", - "parameters": { - "file_path": "string", - "analysis_prompt": "string (optional)" - } - }, - { - "name": "explain_code", - "description": "コードを説明", - "parameters": { - "code": "string", - "language": "string (optional, default: python)" - } - }, - { - "name": "create_project", - "description": "新しいプロジェクトを作成", - "parameters": { - "project_type": "string (rust/python/node)", - "project_name": "string", - "location": "string (optional, default: .)" - } - }, - { - "name": "execute_command", - "description": "シェルコマンドを実行", - "parameters": { - "command": "string", - "working_dir": "string (optional, default: .)" - } - }, - { - "name": "write_file", - "description": "ファイルを書き込み", - "parameters": { - "file_path": "string", - "content": "string", - "backup": "boolean (optional, default: true)" - } - } - ] - - async def execute_tool(self, tool_name: str, params: Dict[str, Any]) -> Dict[str, Any]: - """ツールを実行""" - try: - if tool_name == "generate_code": - result = await self.code_with_local_llm( - prompt=params["prompt"], - language=params.get("language", "python") - ) - return result - - elif tool_name == "analyze_file": - result = await self.analyze_file( - file_path=params["file_path"], - analysis_prompt=params.get("analysis_prompt", "Analyze this file") - ) - return result - - elif tool_name == "explain_code": - result = await self.explain_code( - code=params["code"], - language=params.get("language", "python") - ) - return result - - elif tool_name == "create_project": - result = await self.create_project( - project_type=params["project_type"], - project_name=params["project_name"], - location=params.get("location", ".") - ) - return result - - elif tool_name == "execute_command": - result = await self.execute_command( - command=params["command"], - working_dir=params.get("working_dir", ".") - ) - return result - - elif tool_name == "write_file": - result = await self.write_file( - file_path=params["file_path"], - content=params["content"], - backup=params.get("backup", True) - ) - return result - - else: - return {"error": f"Unknown tool: {tool_name}"} - - except Exception as e: - return {"error": str(e)} \ No newline at end of file diff --git a/log b/log deleted file mode 160000 index c0e4dc6..0000000 --- a/log +++ /dev/null @@ -1 +0,0 @@ -Subproject commit c0e4dc63eaceb9951a927a2a543d877a634036b1 diff --git a/python_backup/pyproject.toml b/python_backup/pyproject.toml deleted file mode 100644 index e417e5c..0000000 --- a/python_backup/pyproject.toml +++ /dev/null @@ -1,37 +0,0 @@ -[project] -name = "aigpt" -version = "0.1.0" -description = "Autonomous transmission AI with unique personality based on relationship parameters" -requires-python = ">=3.10" -dependencies = [ - "click>=8.0.0", - "typer>=0.9.0", - "fastapi-mcp>=0.1.0", - "pydantic>=2.0.0", - "httpx>=0.24.0", - "rich>=13.0.0", - "python-dotenv>=1.0.0", - "ollama>=0.1.0", - "openai>=1.0.0", - "uvicorn>=0.23.0", - "apscheduler>=3.10.0", - "croniter>=1.3.0", - "prompt-toolkit>=3.0.0", - # Documentation management - "jinja2>=3.0.0", - "gitpython>=3.1.0", - "pathlib-extensions>=0.1.0", -] - -[project.scripts] -aigpt = "aigpt.cli:app" - -[build-system] -requires = ["setuptools>=61.0", "wheel"] -build-backend = "setuptools.build_meta" - -[tool.setuptools.packages.find] -where = ["src"] - -[tool.setuptools.package-data] -aigpt = ["data/*.json"] \ No newline at end of file diff --git a/python_backup/setup_venv.sh b/python_backup/setup_venv.sh deleted file mode 100755 index 5e7b93c..0000000 --- a/python_backup/setup_venv.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/zsh -# Setup Python virtual environment in the new config directory - -VENV_DIR="$HOME/.config/syui/ai/gpt/venv" - -echo "Creating Python virtual environment at: $VENV_DIR" -python -m venv "$VENV_DIR" - -echo "Activating virtual environment..." -source "$VENV_DIR/bin/activate" - -echo "Installing aigpt package..." -cd "$(dirname "$0")" -pip install -e . - -echo "Setup complete!" -echo "To activate the virtual environment, run:" -echo "source ~/.config/syui/ai/gpt/venv/bin/activate" - -if [ -z "`$SHELL -i -c \"alias aigpt\"`" ]; then - echo 'alias aigpt="$HOME/.config/syui/ai/gpt/venv/bin/aigpt"' >> ${HOME}/.$(basename $SHELL)rc - exec $SHELL -fi diff --git a/python_backup/src/aigpt.egg-info/PKG-INFO b/python_backup/src/aigpt.egg-info/PKG-INFO deleted file mode 100644 index 0760d33..0000000 --- a/python_backup/src/aigpt.egg-info/PKG-INFO +++ /dev/null @@ -1,21 +0,0 @@ -Metadata-Version: 2.4 -Name: aigpt -Version: 0.1.0 -Summary: Autonomous transmission AI with unique personality based on relationship parameters -Requires-Python: >=3.10 -Requires-Dist: click>=8.0.0 -Requires-Dist: typer>=0.9.0 -Requires-Dist: fastapi-mcp>=0.1.0 -Requires-Dist: pydantic>=2.0.0 -Requires-Dist: httpx>=0.24.0 -Requires-Dist: rich>=13.0.0 -Requires-Dist: python-dotenv>=1.0.0 -Requires-Dist: ollama>=0.1.0 -Requires-Dist: openai>=1.0.0 -Requires-Dist: uvicorn>=0.23.0 -Requires-Dist: apscheduler>=3.10.0 -Requires-Dist: croniter>=1.3.0 -Requires-Dist: prompt-toolkit>=3.0.0 -Requires-Dist: jinja2>=3.0.0 -Requires-Dist: gitpython>=3.1.0 -Requires-Dist: pathlib-extensions>=0.1.0 diff --git a/python_backup/src/aigpt.egg-info/SOURCES.txt b/python_backup/src/aigpt.egg-info/SOURCES.txt deleted file mode 100644 index d52df4a..0000000 --- a/python_backup/src/aigpt.egg-info/SOURCES.txt +++ /dev/null @@ -1,34 +0,0 @@ -README.md -pyproject.toml -src/aigpt/__init__.py -src/aigpt/ai_provider.py -src/aigpt/chatgpt_importer.py -src/aigpt/cli.py -src/aigpt/config.py -src/aigpt/fortune.py -src/aigpt/mcp_server.py -src/aigpt/mcp_server_simple.py -src/aigpt/memory.py -src/aigpt/models.py -src/aigpt/persona.py -src/aigpt/project_manager.py -src/aigpt/relationship.py -src/aigpt/scheduler.py -src/aigpt/transmission.py -src/aigpt.egg-info/PKG-INFO -src/aigpt.egg-info/SOURCES.txt -src/aigpt.egg-info/dependency_links.txt -src/aigpt.egg-info/entry_points.txt -src/aigpt.egg-info/requires.txt -src/aigpt.egg-info/top_level.txt -src/aigpt/commands/docs.py -src/aigpt/commands/submodules.py -src/aigpt/commands/tokens.py -src/aigpt/docs/__init__.py -src/aigpt/docs/config.py -src/aigpt/docs/git_utils.py -src/aigpt/docs/templates.py -src/aigpt/docs/utils.py -src/aigpt/docs/wiki_generator.py -src/aigpt/shared/__init__.py -src/aigpt/shared/ai_provider.py \ No newline at end of file diff --git a/python_backup/src/aigpt.egg-info/dependency_links.txt b/python_backup/src/aigpt.egg-info/dependency_links.txt deleted file mode 100644 index 8b13789..0000000 --- a/python_backup/src/aigpt.egg-info/dependency_links.txt +++ /dev/null @@ -1 +0,0 @@ - diff --git a/python_backup/src/aigpt.egg-info/entry_points.txt b/python_backup/src/aigpt.egg-info/entry_points.txt deleted file mode 100644 index 65200d4..0000000 --- a/python_backup/src/aigpt.egg-info/entry_points.txt +++ /dev/null @@ -1,2 +0,0 @@ -[console_scripts] -aigpt = aigpt.cli:app diff --git a/python_backup/src/aigpt.egg-info/requires.txt b/python_backup/src/aigpt.egg-info/requires.txt deleted file mode 100644 index 1ed965c..0000000 --- a/python_backup/src/aigpt.egg-info/requires.txt +++ /dev/null @@ -1,16 +0,0 @@ -click>=8.0.0 -typer>=0.9.0 -fastapi-mcp>=0.1.0 -pydantic>=2.0.0 -httpx>=0.24.0 -rich>=13.0.0 -python-dotenv>=1.0.0 -ollama>=0.1.0 -openai>=1.0.0 -uvicorn>=0.23.0 -apscheduler>=3.10.0 -croniter>=1.3.0 -prompt-toolkit>=3.0.0 -jinja2>=3.0.0 -gitpython>=3.1.0 -pathlib-extensions>=0.1.0 diff --git a/python_backup/src/aigpt.egg-info/top_level.txt b/python_backup/src/aigpt.egg-info/top_level.txt deleted file mode 100644 index f7d9c68..0000000 --- a/python_backup/src/aigpt.egg-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -aigpt diff --git a/python_backup/src/aigpt/__init__.py b/python_backup/src/aigpt/__init__.py deleted file mode 100644 index c29231b..0000000 --- a/python_backup/src/aigpt/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -"""ai.gpt - Autonomous transmission AI with unique personality""" - -__version__ = "0.1.0" - -from .memory import MemoryManager -from .relationship import RelationshipTracker -from .persona import Persona -from .transmission import TransmissionController - -__all__ = [ - "MemoryManager", - "RelationshipTracker", - "Persona", - "TransmissionController", -] \ No newline at end of file diff --git a/python_backup/src/aigpt/ai_provider.py b/python_backup/src/aigpt/ai_provider.py deleted file mode 100644 index 2159e7f..0000000 --- a/python_backup/src/aigpt/ai_provider.py +++ /dev/null @@ -1,580 +0,0 @@ -"""AI Provider integration for response generation""" - -import os -import json -from typing import Optional, Dict, List, Any, Protocol -from abc import abstractmethod -import logging -import httpx -from openai import OpenAI -import ollama - -from .models import PersonaState, Memory -from .config import Config - - -class AIProvider(Protocol): - """Protocol for AI providers""" - - @abstractmethod - async def generate_response( - self, - prompt: str, - persona_state: PersonaState, - memories: List[Memory], - system_prompt: Optional[str] = None - ) -> str: - """Generate a response based on prompt and context""" - pass - - -class OllamaProvider: - """Ollama AI provider""" - - def __init__(self, model: str = "qwen2.5", host: Optional[str] = None): - self.model = model - # Use environment variable OLLAMA_HOST if available, otherwise use config or default - self.host = host or os.getenv('OLLAMA_HOST', 'http://127.0.0.1:11434') - # Ensure proper URL format - if not self.host.startswith('http'): - self.host = f'http://{self.host}' - self.client = ollama.Client(host=self.host, timeout=60.0) # 60秒タイムアウト - self.logger = logging.getLogger(__name__) - self.logger.info(f"OllamaProvider initialized with host: {self.host}, model: {self.model}") - - # Load system prompt from config - try: - config = Config() - self.config_system_prompt = config.get('providers.ollama.system_prompt') - except: - self.config_system_prompt = None - - async def generate_response( - self, - prompt: str, - persona_state: PersonaState, - memories: List[Memory], - system_prompt: Optional[str] = None - ) -> str: - """Generate response using Ollama""" - - # Build context from memories - memory_context = "\n".join([ - f"[{mem.level.value}] {mem.content[:200]}..." - for mem in memories[:5] - ]) - - # Build personality context - personality_desc = ", ".join([ - f"{trait}: {value:.1f}" - for trait, value in persona_state.base_personality.items() - ]) - - # System prompt with persona context - full_system_prompt = f"""You are an AI with the following characteristics: -Current mood: {persona_state.current_mood} -Fortune today: {persona_state.fortune.fortune_value}/10 -Personality traits: {personality_desc} - -Recent memories: -{memory_context} - -{system_prompt or self.config_system_prompt or 'Respond naturally based on your current state and memories.'}""" - - try: - response = self.client.chat( - model=self.model, - messages=[ - {"role": "system", "content": full_system_prompt}, - {"role": "user", "content": prompt} - ] - ) - return self._clean_response(response['message']['content']) - except Exception as e: - self.logger.error(f"Ollama generation failed: {e}") - return self._fallback_response(persona_state) - - def chat(self, prompt: str, max_tokens: int = 2000) -> str: - """Simple chat interface""" - try: - messages = [] - if self.config_system_prompt: - messages.append({"role": "system", "content": self.config_system_prompt}) - messages.append({"role": "user", "content": prompt}) - - response = self.client.chat( - model=self.model, - messages=messages, - options={ - "num_predict": max_tokens, - "temperature": 0.7, - "top_p": 0.9, - }, - stream=False # ストリーミング無効化で安定性向上 - ) - return self._clean_response(response['message']['content']) - except Exception as e: - self.logger.error(f"Ollama chat failed (host: {self.host}): {e}") - return "I'm having trouble connecting to the AI model." - - def _clean_response(self, response: str) -> str: - """Clean response by removing think tags and other unwanted content""" - import re - # Remove tags and their content - response = re.sub(r'.*?', '', response, flags=re.DOTALL) - # Remove any remaining whitespace at the beginning/end - response = response.strip() - return response - - def _fallback_response(self, persona_state: PersonaState) -> str: - """Fallback response based on mood""" - mood_responses = { - "joyful": "That's wonderful! I'm feeling great today!", - "cheerful": "That sounds nice!", - "neutral": "I understand.", - "melancholic": "I see... That's something to think about.", - "contemplative": "Hmm, let me consider that..." - } - return mood_responses.get(persona_state.current_mood, "I see.") - - -class OpenAIProvider: - """OpenAI API provider with MCP function calling support""" - - def __init__(self, model: str = "gpt-4o-mini", api_key: Optional[str] = None, mcp_client=None): - self.model = model - # Try to get API key from config first - config = Config() - self.api_key = api_key or config.get_api_key("openai") or os.getenv("OPENAI_API_KEY") - if not self.api_key: - raise ValueError("OpenAI API key not provided. Set it with: aigpt config set providers.openai.api_key YOUR_KEY") - self.client = OpenAI(api_key=self.api_key) - self.logger = logging.getLogger(__name__) - self.mcp_client = mcp_client # For MCP function calling - - # Load system prompt from config - try: - self.config_system_prompt = config.get('providers.openai.system_prompt') - except: - self.config_system_prompt = None - - def _get_mcp_tools(self) -> List[Dict[str, Any]]: - """Generate OpenAI tools from MCP endpoints""" - if not self.mcp_client or not self.mcp_client.available: - return [] - - tools = [ - { - "type": "function", - "function": { - "name": "get_memories", - "description": "過去の会話記憶を取得します。「覚えている」「前回」「以前」などの質問で必ず使用してください", - "parameters": { - "type": "object", - "properties": { - "limit": { - "type": "integer", - "description": "取得する記憶の数", - "default": 5 - } - } - } - } - }, - { - "type": "function", - "function": { - "name": "search_memories", - "description": "特定のトピックについて話した記憶を検索します。「プログラミングについて」「○○について話した」などの質問で使用してください", - "parameters": { - "type": "object", - "properties": { - "keywords": { - "type": "array", - "items": {"type": "string"}, - "description": "検索キーワードの配列" - } - }, - "required": ["keywords"] - } - } - }, - { - "type": "function", - "function": { - "name": "get_contextual_memories", - "description": "クエリに関連する文脈的記憶を取得します", - "parameters": { - "type": "object", - "properties": { - "query": { - "type": "string", - "description": "検索クエリ" - }, - "limit": { - "type": "integer", - "description": "取得する記憶の数", - "default": 5 - } - }, - "required": ["query"] - } - } - }, - { - "type": "function", - "function": { - "name": "get_relationship", - "description": "特定ユーザーとの関係性情報を取得します", - "parameters": { - "type": "object", - "properties": { - "user_id": { - "type": "string", - "description": "ユーザーID" - } - }, - "required": ["user_id"] - } - } - } - ] - - # Add ai.card tools if available - if hasattr(self.mcp_client, 'has_card_tools') and self.mcp_client.has_card_tools: - card_tools = [ - { - "type": "function", - "function": { - "name": "card_get_user_cards", - "description": "ユーザーが所有するカードの一覧を取得します", - "parameters": { - "type": "object", - "properties": { - "did": { - "type": "string", - "description": "ユーザーのDID" - }, - "limit": { - "type": "integer", - "description": "取得するカード数の上限", - "default": 10 - } - }, - "required": ["did"] - } - } - }, - { - "type": "function", - "function": { - "name": "card_draw_card", - "description": "ガチャを引いてカードを取得します", - "parameters": { - "type": "object", - "properties": { - "did": { - "type": "string", - "description": "ユーザーのDID" - }, - "is_paid": { - "type": "boolean", - "description": "有料ガチャかどうか", - "default": False - } - }, - "required": ["did"] - } - } - }, - { - "type": "function", - "function": { - "name": "card_analyze_collection", - "description": "ユーザーのカードコレクションを分析します", - "parameters": { - "type": "object", - "properties": { - "did": { - "type": "string", - "description": "ユーザーのDID" - } - }, - "required": ["did"] - } - } - }, - { - "type": "function", - "function": { - "name": "card_get_gacha_stats", - "description": "ガチャの統計情報を取得します", - "parameters": { - "type": "object", - "properties": {} - } - } - } - ] - tools.extend(card_tools) - - return tools - - async def generate_response( - self, - prompt: str, - persona_state: PersonaState, - memories: List[Memory], - system_prompt: Optional[str] = None - ) -> str: - """Generate response using OpenAI""" - - # Build context similar to Ollama - memory_context = "\n".join([ - f"[{mem.level.value}] {mem.content[:200]}..." - for mem in memories[:5] - ]) - - personality_desc = ", ".join([ - f"{trait}: {value:.1f}" - for trait, value in persona_state.base_personality.items() - ]) - - full_system_prompt = f"""You are an AI with unique personality traits and memories. -Current mood: {persona_state.current_mood} -Fortune today: {persona_state.fortune.fortune_value}/10 -Personality traits: {personality_desc} - -Recent memories: -{memory_context} - -{system_prompt or self.config_system_prompt or 'Respond naturally based on your current state and memories. Be authentic to your mood and personality.'}""" - - try: - response = self.client.chat.completions.create( - model=self.model, - messages=[ - {"role": "system", "content": full_system_prompt}, - {"role": "user", "content": prompt} - ], - temperature=0.7 + (persona_state.fortune.fortune_value - 5) * 0.05 # Vary by fortune - ) - return response.choices[0].message.content - except Exception as e: - self.logger.error(f"OpenAI generation failed: {e}") - return self._fallback_response(persona_state) - - async def chat_with_mcp(self, prompt: str, max_tokens: int = 2000, user_id: str = "user") -> str: - """Chat interface with MCP function calling support""" - if not self.mcp_client or not self.mcp_client.available: - return self.chat(prompt, max_tokens) - - try: - # Prepare tools - tools = self._get_mcp_tools() - - # Initial request with tools - response = self.client.chat.completions.create( - model=self.model, - messages=[ - {"role": "system", "content": self.config_system_prompt or "あなたは記憶システムと関係性データ、カードゲームシステムにアクセスできます。過去の会話、記憶、関係性について質問された時は、必ずツールを使用して正確な情報を取得してください。「覚えている」「前回」「以前」「について話した」「関係」などのキーワードがあれば積極的にツールを使用してください。カード関連の質問(「カード」「コレクション」「ガチャ」「見せて」「持っている」など)では、必ずcard_get_user_cardsやcard_analyze_collectionなどのツールを使用してください。didパラメータには現在会話しているユーザーのID(例:'syui')を使用してください。"}, - {"role": "user", "content": prompt} - ], - tools=tools, - tool_choice="auto", - max_tokens=max_tokens, - temperature=0.7 - ) - - message = response.choices[0].message - - # Handle tool calls - if message.tool_calls: - print(f"🔧 [OpenAI] {len(message.tool_calls)} tools called:") - for tc in message.tool_calls: - print(f" - {tc.function.name}({tc.function.arguments})") - - messages = [ - {"role": "system", "content": self.config_system_prompt or "必要に応じて利用可能なツールを使って、より正確で詳細な回答を提供してください。"}, - {"role": "user", "content": prompt}, - { - "role": "assistant", - "content": message.content, - "tool_calls": [tc.model_dump() for tc in message.tool_calls] - } - ] - - # Execute each tool call - for tool_call in message.tool_calls: - print(f"🌐 [MCP] Executing {tool_call.function.name}...") - tool_result = await self._execute_mcp_tool(tool_call, user_id) - print(f"✅ [MCP] Result: {str(tool_result)[:100]}...") - messages.append({ - "role": "tool", - "tool_call_id": tool_call.id, - "name": tool_call.function.name, - "content": json.dumps(tool_result, ensure_ascii=False) - }) - - # Get final response with tool outputs - final_response = self.client.chat.completions.create( - model=self.model, - messages=messages, - max_tokens=max_tokens, - temperature=0.7 - ) - - return final_response.choices[0].message.content - else: - return message.content - - except Exception as e: - self.logger.error(f"OpenAI MCP chat failed: {e}") - return f"申し訳ありません。エラーが発生しました: {e}" - - async def _execute_mcp_tool(self, tool_call, context_user_id: str = "user") -> Dict[str, Any]: - """Execute MCP tool call""" - try: - import json - function_name = tool_call.function.name - arguments = json.loads(tool_call.function.arguments) - - if function_name == "get_memories": - limit = arguments.get("limit", 5) - return await self.mcp_client.get_memories(limit) or {"error": "記憶の取得に失敗しました"} - - elif function_name == "search_memories": - keywords = arguments.get("keywords", []) - return await self.mcp_client.search_memories(keywords) or {"error": "記憶の検索に失敗しました"} - - elif function_name == "get_contextual_memories": - query = arguments.get("query", "") - limit = arguments.get("limit", 5) - return await self.mcp_client.get_contextual_memories(query, limit) or {"error": "文脈記憶の取得に失敗しました"} - - elif function_name == "get_relationship": - # 引数のuser_idがない場合はコンテキストから取得 - user_id = arguments.get("user_id", context_user_id) - if not user_id or user_id == "user": - user_id = context_user_id - # デバッグ用ログ - print(f"🔍 [DEBUG] get_relationship called with user_id: '{user_id}' (context: '{context_user_id}')") - result = await self.mcp_client.get_relationship(user_id) - print(f"🔍 [DEBUG] MCP result: {result}") - return result or {"error": "関係性の取得に失敗しました"} - - # ai.card tools - elif function_name == "card_get_user_cards": - did = arguments.get("did", context_user_id) - limit = arguments.get("limit", 10) - result = await self.mcp_client.card_get_user_cards(did, limit) - # Check if ai.card server is not running - if result and result.get("error") == "ai.card server is not running": - return { - "error": "ai.cardサーバーが起動していません", - "message": "カードシステムを使用するには、別のターミナルで以下のコマンドを実行してください:\ncd card && ./start_server.sh" - } - return result or {"error": "カード一覧の取得に失敗しました"} - - elif function_name == "card_draw_card": - did = arguments.get("did", context_user_id) - is_paid = arguments.get("is_paid", False) - result = await self.mcp_client.card_draw_card(did, is_paid) - if result and result.get("error") == "ai.card server is not running": - return { - "error": "ai.cardサーバーが起動していません", - "message": "カードシステムを使用するには、別のターミナルで以下のコマンドを実行してください:\ncd card && ./start_server.sh" - } - return result or {"error": "ガチャに失敗しました"} - - elif function_name == "card_analyze_collection": - did = arguments.get("did", context_user_id) - result = await self.mcp_client.card_analyze_collection(did) - if result and result.get("error") == "ai.card server is not running": - return { - "error": "ai.cardサーバーが起動していません", - "message": "カードシステムを使用するには、別のターミナルで以下のコマンドを実行してください:\ncd card && ./start_server.sh" - } - return result or {"error": "コレクション分析に失敗しました"} - - elif function_name == "card_get_gacha_stats": - result = await self.mcp_client.card_get_gacha_stats() - if result and result.get("error") == "ai.card server is not running": - return { - "error": "ai.cardサーバーが起動していません", - "message": "カードシステムを使用するには、別のターミナルで以下のコマンドを実行してください:\ncd card && ./start_server.sh" - } - return result or {"error": "ガチャ統計の取得に失敗しました"} - - else: - return {"error": f"未知のツール: {function_name}"} - - except Exception as e: - return {"error": f"ツール実行エラー: {str(e)}"} - - def chat(self, prompt: str, max_tokens: int = 2000) -> str: - """Simple chat interface without MCP tools""" - try: - messages = [] - if self.config_system_prompt: - messages.append({"role": "system", "content": self.config_system_prompt}) - messages.append({"role": "user", "content": prompt}) - - response = self.client.chat.completions.create( - model=self.model, - messages=messages, - max_tokens=max_tokens, - temperature=0.7 - ) - return response.choices[0].message.content - except Exception as e: - self.logger.error(f"OpenAI chat failed: {e}") - return "I'm having trouble connecting to the AI model." - - def _fallback_response(self, persona_state: PersonaState) -> str: - """Fallback response based on mood""" - mood_responses = { - "joyful": "What a delightful conversation!", - "cheerful": "That's interesting!", - "neutral": "I understand what you mean.", - "melancholic": "I've been thinking about that too...", - "contemplative": "That gives me something to ponder..." - } - return mood_responses.get(persona_state.current_mood, "I see.") - - -def create_ai_provider(provider: str = "ollama", model: Optional[str] = None, mcp_client=None, **kwargs) -> AIProvider: - """Factory function to create AI providers""" - if provider == "ollama": - # Get model from config if not provided - if model is None: - try: - from .config import Config - config = Config() - model = config.get('providers.ollama.default_model', 'qwen2.5') - except: - model = 'qwen2.5' # Fallback to default - - # Try to get host from config if not provided in kwargs - if 'host' not in kwargs: - try: - from .config import Config - config = Config() - config_host = config.get('providers.ollama.host') - if config_host: - kwargs['host'] = config_host - except: - pass # Use environment variable or default - return OllamaProvider(model=model, **kwargs) - elif provider == "openai": - # Get model from config if not provided - if model is None: - try: - from .config import Config - config = Config() - model = config.get('providers.openai.default_model', 'gpt-4o-mini') - except: - model = 'gpt-4o-mini' # Fallback to default - return OpenAIProvider(model=model, mcp_client=mcp_client, **kwargs) - else: - raise ValueError(f"Unknown provider: {provider}") diff --git a/python_backup/src/aigpt/chatgpt_importer.py b/python_backup/src/aigpt/chatgpt_importer.py deleted file mode 100644 index 08d3840..0000000 --- a/python_backup/src/aigpt/chatgpt_importer.py +++ /dev/null @@ -1,192 +0,0 @@ -"""ChatGPT conversation data importer for ai.gpt""" - -import json -import uuid -from datetime import datetime -from pathlib import Path -from typing import Dict, List, Any, Optional -import logging - -from .models import Memory, MemoryLevel, Conversation -from .memory import MemoryManager -from .relationship import RelationshipTracker - -logger = logging.getLogger(__name__) - - -class ChatGPTImporter: - """Import ChatGPT conversation data into ai.gpt memory system""" - - def __init__(self, data_dir: Path): - self.data_dir = data_dir - self.memory_manager = MemoryManager(data_dir) - self.relationship_tracker = RelationshipTracker(data_dir) - - def import_from_file(self, file_path: Path, user_id: str = "chatgpt_user") -> Dict[str, Any]: - """Import ChatGPT conversations from JSON file - - Args: - file_path: Path to ChatGPT export JSON file - user_id: User ID to associate with imported conversations - - Returns: - Dict with import statistics - """ - try: - with open(file_path, 'r', encoding='utf-8') as f: - chatgpt_data = json.load(f) - - return self._import_conversations(chatgpt_data, user_id) - - except Exception as e: - logger.error(f"Failed to import ChatGPT data: {e}") - raise - - def _import_conversations(self, chatgpt_data: List[Dict], user_id: str) -> Dict[str, Any]: - """Import multiple conversations from ChatGPT data""" - stats = { - "conversations_imported": 0, - "messages_imported": 0, - "user_messages": 0, - "assistant_messages": 0, - "skipped_messages": 0 - } - - for conversation_data in chatgpt_data: - try: - conv_stats = self._import_single_conversation(conversation_data, user_id) - - # Update overall stats - stats["conversations_imported"] += 1 - stats["messages_imported"] += conv_stats["messages"] - stats["user_messages"] += conv_stats["user_messages"] - stats["assistant_messages"] += conv_stats["assistant_messages"] - stats["skipped_messages"] += conv_stats["skipped"] - - except Exception as e: - logger.warning(f"Failed to import conversation '{conversation_data.get('title', 'Unknown')}': {e}") - continue - - logger.info(f"Import completed: {stats}") - return stats - - def _import_single_conversation(self, conversation_data: Dict, user_id: str) -> Dict[str, int]: - """Import a single conversation from ChatGPT""" - title = conversation_data.get("title", "Untitled") - create_time = conversation_data.get("create_time") - mapping = conversation_data.get("mapping", {}) - - stats = {"messages": 0, "user_messages": 0, "assistant_messages": 0, "skipped": 0} - - # Extract messages in chronological order - messages = self._extract_messages_from_mapping(mapping) - - for msg in messages: - try: - role = msg["author"]["role"] - content = self._extract_content(msg["content"]) - create_time_msg = msg.get("create_time") - - if not content or role not in ["user", "assistant"]: - stats["skipped"] += 1 - continue - - # Convert to ai.gpt format - if role == "user": - # User message - create memory entry - self._add_user_message(user_id, content, create_time_msg, title) - stats["user_messages"] += 1 - - elif role == "assistant": - # Assistant message - create AI response memory - self._add_assistant_message(user_id, content, create_time_msg, title) - stats["assistant_messages"] += 1 - - stats["messages"] += 1 - - except Exception as e: - logger.warning(f"Failed to process message in '{title}': {e}") - stats["skipped"] += 1 - continue - - logger.info(f"Imported conversation '{title}': {stats}") - return stats - - def _extract_messages_from_mapping(self, mapping: Dict) -> List[Dict]: - """Extract messages from ChatGPT mapping structure in chronological order""" - messages = [] - - for node_id, node_data in mapping.items(): - message = node_data.get("message") - if message and message.get("author", {}).get("role") in ["user", "assistant"]: - # Skip system messages and hidden messages - metadata = message.get("metadata", {}) - if not metadata.get("is_visually_hidden_from_conversation", False): - messages.append(message) - - # Sort by create_time if available - messages.sort(key=lambda x: x.get("create_time") or 0) - return messages - - def _extract_content(self, content_data: Dict) -> Optional[str]: - """Extract text content from ChatGPT content structure""" - if not content_data: - return None - - content_type = content_data.get("content_type") - - if content_type == "text": - parts = content_data.get("parts", []) - if parts and parts[0]: - return parts[0].strip() - - elif content_type == "user_editable_context": - # User context/instructions - user_instructions = content_data.get("user_instructions", "") - if user_instructions: - return f"[User Context] {user_instructions}" - - return None - - def _add_user_message(self, user_id: str, content: str, create_time: Optional[float], conversation_title: str): - """Add user message to ai.gpt memory system""" - timestamp = datetime.fromtimestamp(create_time) if create_time else datetime.now() - - # Create conversation record - conversation = Conversation( - id=str(uuid.uuid4()), - user_id=user_id, - user_message=content, - ai_response="", # Will be filled by next assistant message - timestamp=timestamp, - context={"source": "chatgpt_import", "conversation_title": conversation_title} - ) - - # Add to memory with CORE level (imported data is important) - memory = Memory( - id=str(uuid.uuid4()), - timestamp=timestamp, - content=content, - level=MemoryLevel.CORE, - importance_score=0.8 # High importance for imported data - ) - - self.memory_manager.add_memory(memory) - - # Update relationship (positive interaction) - self.relationship_tracker.update_interaction(user_id, 1.0) - - def _add_assistant_message(self, user_id: str, content: str, create_time: Optional[float], conversation_title: str): - """Add assistant message to ai.gpt memory system""" - timestamp = datetime.fromtimestamp(create_time) if create_time else datetime.now() - - # Add assistant response as memory (AI's own responses can inform future behavior) - memory = Memory( - id=str(uuid.uuid4()), - timestamp=timestamp, - content=f"[AI Response] {content}", - level=MemoryLevel.SUMMARY, - importance_score=0.6 # Medium importance for AI responses - ) - - self.memory_manager.add_memory(memory) \ No newline at end of file diff --git a/python_backup/src/aigpt/cli.py b/python_backup/src/aigpt/cli.py deleted file mode 100644 index 614199e..0000000 --- a/python_backup/src/aigpt/cli.py +++ /dev/null @@ -1,1596 +0,0 @@ -"""CLI interface for ai.gpt using typer""" - -import typer -from pathlib import Path -from typing import Optional, Dict, Any -from rich.console import Console -from rich.table import Table -from rich.panel import Panel -from datetime import datetime, timedelta -import subprocess -import shlex -import httpx -import asyncio -from prompt_toolkit import prompt as ptk_prompt -from prompt_toolkit.completion import WordCompleter, Completer, Completion -from prompt_toolkit.history import FileHistory -from prompt_toolkit.auto_suggest import AutoSuggestFromHistory - -from .persona import Persona -from .transmission import TransmissionController -from .mcp_server import AIGptMcpServer -from .ai_provider import create_ai_provider -from .scheduler import AIScheduler, TaskType -from .config import Config -from .project_manager import ContinuousDeveloper -from .commands.docs import docs_app -from .commands.submodules import submodules_app -from .commands.tokens import tokens_app - -app = typer.Typer(help="ai.gpt - Autonomous transmission AI with unique personality") -console = Console() - -# Configuration -config = Config() -DEFAULT_DATA_DIR = config.data_dir - - -class MCPClient: - """Client for communicating with MCP server using config settings""" - - def __init__(self, config: Optional[Config] = None): - self.config = config or Config() - self.enabled = self.config.get("mcp.enabled", True) - self.auto_detect = self.config.get("mcp.auto_detect", True) - self.servers = self.config.get("mcp.servers", {}) - self.available = False - self.has_card_tools = False - - if self.enabled: - self._check_availability() - - def _check_availability(self): - """Check if any MCP server is available""" - self.available = False - if not self.enabled: - print(f"🚨 [MCP Client] MCP disabled in config") - return - - print(f"🔍 [MCP Client] Checking availability...") - print(f"🔍 [MCP Client] Available servers: {list(self.servers.keys())}") - - # Check ai.gpt server first (primary) - ai_gpt_config = self.servers.get("ai_gpt", {}) - if ai_gpt_config: - base_url = ai_gpt_config.get("base_url", "http://localhost:8001") - timeout = ai_gpt_config.get("timeout", 5.0) - - # Convert timeout to float if it's a string - if isinstance(timeout, str): - timeout = float(timeout) - - print(f"🔍 [MCP Client] Testing ai_gpt server: {base_url} (timeout: {timeout})") - try: - import httpx - with httpx.Client(timeout=timeout) as client: - response = client.get(f"{base_url}/docs") - print(f"🔍 [MCP Client] ai_gpt response: {response.status_code}") - if response.status_code == 200: - self.available = True - self.active_server = "ai_gpt" - print(f"✅ [MCP Client] ai_gpt server connected successfully") - - # Check if card tools are available - try: - card_status = client.get(f"{base_url}/card_system_status") - if card_status.status_code == 200: - self.has_card_tools = True - print(f"✅ [MCP Client] ai.card tools detected and available") - except: - print(f"🔍 [MCP Client] ai.card tools not available") - - return - except Exception as e: - print(f"🚨 [MCP Client] ai_gpt connection failed: {e}") - else: - print(f"🚨 [MCP Client] No ai_gpt config found") - - # If auto_detect is enabled, try to find any available server - if self.auto_detect: - print(f"🔍 [MCP Client] Auto-detect enabled, trying other servers...") - for server_name, server_config in self.servers.items(): - base_url = server_config.get("base_url", "") - timeout = server_config.get("timeout", 5.0) - - # Convert timeout to float if it's a string - if isinstance(timeout, str): - timeout = float(timeout) - - print(f"🔍 [MCP Client] Testing {server_name}: {base_url} (timeout: {timeout})") - try: - import httpx - with httpx.Client(timeout=timeout) as client: - response = client.get(f"{base_url}/docs") - print(f"🔍 [MCP Client] {server_name} response: {response.status_code}") - if response.status_code == 200: - self.available = True - self.active_server = server_name - print(f"✅ [MCP Client] {server_name} server connected successfully") - return - except Exception as e: - print(f"🚨 [MCP Client] {server_name} connection failed: {e}") - - print(f"🚨 [MCP Client] No MCP servers available") - - def _get_url(self, endpoint_name: str) -> Optional[str]: - """Get full URL for an endpoint""" - if not self.available or not hasattr(self, 'active_server'): - print(f"🚨 [MCP Client] Not available or no active server") - return None - - server_config = self.servers.get(self.active_server, {}) - base_url = server_config.get("base_url", "") - endpoints = server_config.get("endpoints", {}) - endpoint_path = endpoints.get(endpoint_name, "") - - print(f"🔍 [MCP Client] Server: {self.active_server}") - print(f"🔍 [MCP Client] Base URL: {base_url}") - print(f"🔍 [MCP Client] Endpoints: {list(endpoints.keys())}") - print(f"🔍 [MCP Client] Looking for: {endpoint_name}") - print(f"🔍 [MCP Client] Found path: {endpoint_path}") - - if base_url and endpoint_path: - return f"{base_url}{endpoint_path}" - return None - - def _get_timeout(self) -> float: - """Get timeout for the active server""" - if not hasattr(self, 'active_server'): - return 5.0 - server_config = self.servers.get(self.active_server, {}) - timeout = server_config.get("timeout", 5.0) - - # Convert timeout to float if it's a string - if isinstance(timeout, str): - timeout = float(timeout) - - return timeout - - async def get_memories(self, limit: int = 5) -> Optional[Dict[str, Any]]: - """Get memories via MCP""" - url = self._get_url("get_memories") - if not url: - return None - try: - async with httpx.AsyncClient(timeout=self._get_timeout()) as client: - response = await client.get(f"{url}?limit={limit}") - return response.json() if response.status_code == 200 else None - except Exception: - return None - - async def search_memories(self, keywords: list) -> Optional[Dict[str, Any]]: - """Search memories via MCP""" - url = self._get_url("search_memories") - if not url: - return None - try: - async with httpx.AsyncClient(timeout=self._get_timeout()) as client: - response = await client.post(url, json={"keywords": keywords}) - return response.json() if response.status_code == 200 else None - except Exception: - return None - - async def get_contextual_memories(self, query: str, limit: int = 5) -> Optional[Dict[str, Any]]: - """Get contextual memories via MCP""" - url = self._get_url("get_contextual_memories") - if not url: - return None - try: - async with httpx.AsyncClient(timeout=self._get_timeout()) as client: - response = await client.get(f"{url}?query={query}&limit={limit}") - return response.json() if response.status_code == 200 else None - except Exception: - return None - - async def process_interaction(self, user_id: str, message: str) -> Optional[Dict[str, Any]]: - """Process interaction via MCP""" - url = self._get_url("process_interaction") - if not url: - return None - try: - async with httpx.AsyncClient(timeout=self._get_timeout()) as client: - response = await client.post(url, json={"user_id": user_id, "message": message}) - return response.json() if response.status_code == 200 else None - except Exception: - return None - - async def get_relationship(self, user_id: str) -> Optional[Dict[str, Any]]: - """Get relationship via MCP""" - url = self._get_url("get_relationship") - print(f"🔍 [MCP Client] get_relationship URL: {url}") - if not url: - print(f"🚨 [MCP Client] No URL found for get_relationship") - return None - try: - async with httpx.AsyncClient(timeout=self._get_timeout()) as client: - response = await client.get(f"{url}?user_id={user_id}") - print(f"🔍 [MCP Client] Response status: {response.status_code}") - if response.status_code == 200: - result = response.json() - print(f"🔍 [MCP Client] Response data: {result}") - return result - else: - print(f"🚨 [MCP Client] HTTP error: {response.status_code}") - return None - except Exception as e: - print(f"🚨 [MCP Client] Exception: {e}") - return None - - def get_server_info(self) -> Dict[str, Any]: - """Get information about the active MCP server""" - if not self.available or not hasattr(self, 'active_server'): - return {"available": False} - - server_config = self.servers.get(self.active_server, {}) - return { - "available": True, - "server_name": self.active_server, - "display_name": server_config.get("name", self.active_server), - "base_url": server_config.get("base_url", ""), - "timeout": server_config.get("timeout", 5.0), - "endpoints": len(server_config.get("endpoints", {})), - "has_card_tools": self.has_card_tools - } - - # ai.card MCP methods - async def card_get_user_cards(self, did: str, limit: int = 10) -> Optional[Dict[str, Any]]: - """Get user's card collection via MCP""" - if not self.has_card_tools: - return {"error": "ai.card tools not available"} - - url = self._get_url("card_get_user_cards") - if not url: - return None - try: - async with httpx.AsyncClient(timeout=self._get_timeout()) as client: - response = await client.get(f"{url}?did={did}&limit={limit}") - return response.json() if response.status_code == 200 else None - except Exception as e: - return {"error": f"Failed to get cards: {str(e)}"} - - async def card_draw_card(self, did: str, is_paid: bool = False) -> Optional[Dict[str, Any]]: - """Draw a card from gacha system via MCP""" - if not self.has_card_tools: - return {"error": "ai.card tools not available"} - - url = self._get_url("card_draw_card") - if not url: - return None - try: - async with httpx.AsyncClient(timeout=self._get_timeout()) as client: - response = await client.post(url, json={"did": did, "is_paid": is_paid}) - return response.json() if response.status_code == 200 else None - except Exception as e: - return {"error": f"Failed to draw card: {str(e)}"} - - async def card_analyze_collection(self, did: str) -> Optional[Dict[str, Any]]: - """Analyze card collection via MCP""" - if not self.has_card_tools: - return {"error": "ai.card tools not available"} - - url = self._get_url("card_analyze_collection") - if not url: - return None - try: - async with httpx.AsyncClient(timeout=self._get_timeout()) as client: - response = await client.get(f"{url}?did={did}") - return response.json() if response.status_code == 200 else None - except Exception as e: - return {"error": f"Failed to analyze collection: {str(e)}"} - - async def card_get_gacha_stats(self) -> Optional[Dict[str, Any]]: - """Get gacha statistics via MCP""" - if not self.has_card_tools: - return {"error": "ai.card tools not available"} - - url = self._get_url("card_get_gacha_stats") - if not url: - return None - try: - async with httpx.AsyncClient(timeout=self._get_timeout()) as client: - response = await client.get(url) - return response.json() if response.status_code == 200 else None - except Exception as e: - return {"error": f"Failed to get gacha stats: {str(e)}"} - - -def get_persona(data_dir: Optional[Path] = None) -> Persona: - """Get or create persona instance""" - if data_dir is None: - data_dir = DEFAULT_DATA_DIR - - data_dir.mkdir(parents=True, exist_ok=True) - return Persona(data_dir) - - -@app.command() -def chat( - user_id: str = typer.Argument(..., help="User ID (atproto DID)"), - message: str = typer.Argument(..., help="Message to send to AI"), - data_dir: Optional[Path] = typer.Option(None, "--data-dir", "-d", help="Data directory"), - model: Optional[str] = typer.Option(None, "--model", "-m", help="AI model to use"), - provider: Optional[str] = typer.Option(None, "--provider", help="AI provider (ollama/openai)") -): - """Chat with the AI""" - persona = get_persona(data_dir) - - # Get config instance - config_instance = Config() - - # Get defaults from config if not provided - if not provider: - provider = config_instance.get("default_provider", "ollama") - if not model: - if provider == "ollama": - model = config_instance.get("providers.ollama.default_model", "qwen2.5") - else: - model = config_instance.get("providers.openai.default_model", "gpt-4o-mini") - - # Create AI provider with MCP client if needed - ai_provider = None - mcp_client = None - - try: - # Create MCP client for OpenAI provider - if provider == "openai": - mcp_client = MCPClient(config_instance) - if mcp_client.available: - console.print(f"[dim]MCP client connected to {mcp_client.active_server}[/dim]") - - ai_provider = create_ai_provider(provider=provider, model=model, mcp_client=mcp_client) - console.print(f"[dim]Using {provider} with model {model}[/dim]\n") - except Exception as e: - console.print(f"[yellow]Warning: Could not create AI provider: {e}[/yellow]") - console.print("[yellow]Falling back to simple responses[/yellow]\n") - - # Process interaction - response, relationship_delta = persona.process_interaction(user_id, message, ai_provider) - - # Get updated relationship - relationship = persona.relationships.get_or_create_relationship(user_id) - - # Display response - console.print(Panel(response, title="AI Response", border_style="cyan", expand=True, width=None)) - - # Show relationship status - status_color = "green" if relationship.transmission_enabled else "yellow" - if relationship.is_broken: - status_color = "red" - - console.print(f"\n[{status_color}]Relationship Status:[/{status_color}] {relationship.status.value}") - console.print(f"Score: {relationship.score:.2f} / {relationship.threshold}") - console.print(f"Transmission: {'✓ Enabled' if relationship.transmission_enabled else '✗ Disabled'}") - - if relationship.is_broken: - console.print("[red]⚠️ This relationship is broken and cannot be repaired.[/red]") - - -@app.command() -def status( - user_id: Optional[str] = typer.Argument(None, help="User ID to check status for"), - data_dir: Optional[Path] = typer.Option(None, "--data-dir", "-d", help="Data directory") -): - """Check AI status and relationships""" - persona = get_persona(data_dir) - state = persona.get_current_state() - - # Show AI state - console.print(Panel(f"[cyan]ai.gpt Status[/cyan]", expand=False)) - console.print(f"Mood: {state.current_mood}") - console.print(f"Fortune: {state.fortune.fortune_value}/10") - - if state.fortune.breakthrough_triggered: - console.print("[yellow]⚡ Breakthrough triggered![/yellow]") - - # Show personality traits - table = Table(title="Current Personality") - table.add_column("Trait", style="cyan") - table.add_column("Value", style="magenta") - - for trait, value in state.base_personality.items(): - table.add_row(trait.capitalize(), f"{value:.2f}") - - console.print(table) - - # Show specific relationship if requested - if user_id: - rel = persona.relationships.get_or_create_relationship(user_id) - console.print(f"\n[cyan]Relationship with {user_id}:[/cyan]") - console.print(f"Status: {rel.status.value}") - console.print(f"Score: {rel.score:.2f}") - console.print(f"Total Interactions: {rel.total_interactions}") - console.print(f"Transmission Enabled: {rel.transmission_enabled}") - - -@app.command() -def fortune( - data_dir: Optional[Path] = typer.Option(None, "--data-dir", "-d", help="Data directory") -): - """Check today's AI fortune""" - persona = get_persona(data_dir) - fortune = persona.fortune_system.get_today_fortune() - - # Fortune display - fortune_bar = "🌟" * fortune.fortune_value + "☆" * (10 - fortune.fortune_value) - - console.print(Panel( - f"{fortune_bar}\n\n" - f"Today's Fortune: {fortune.fortune_value}/10\n" - f"Date: {fortune.date}", - title="AI Fortune", - border_style="yellow" - )) - - if fortune.consecutive_good > 0: - console.print(f"[green]Consecutive good days: {fortune.consecutive_good}[/green]") - if fortune.consecutive_bad > 0: - console.print(f"[red]Consecutive bad days: {fortune.consecutive_bad}[/red]") - - if fortune.breakthrough_triggered: - console.print("\n[yellow]⚡ BREAKTHROUGH! Special fortune activated![/yellow]") - - -@app.command() -def transmit( - data_dir: Optional[Path] = typer.Option(None, "--data-dir", "-d", help="Data directory"), - dry_run: bool = typer.Option(True, "--dry-run/--execute", help="Dry run or execute") -): - """Check and execute autonomous transmissions""" - persona = get_persona(data_dir) - controller = TransmissionController(persona, persona.data_dir) - - eligible = controller.check_transmission_eligibility() - - if not eligible: - console.print("[yellow]No users eligible for transmission.[/yellow]") - return - - console.print(f"[green]Found {len(eligible)} eligible users for transmission:[/green]") - - for user_id, rel in eligible.items(): - message = controller.generate_transmission_message(user_id) - if message: - console.print(f"\n[cyan]To:[/cyan] {user_id}") - console.print(f"[cyan]Message:[/cyan] {message}") - console.print(f"[cyan]Relationship:[/cyan] {rel.status.value} (score: {rel.score:.2f})") - - if not dry_run: - # In real implementation, send via atproto or other channel - controller.record_transmission(user_id, message, success=True) - console.print("[green]✓ Transmitted[/green]") - else: - console.print("[yellow]→ Would transmit (dry run)[/yellow]") - - -@app.command() -def maintenance( - data_dir: Optional[Path] = typer.Option(None, "--data-dir", "-d", help="Data directory") -): - """Run daily maintenance tasks""" - persona = get_persona(data_dir) - - console.print("[cyan]Running daily maintenance...[/cyan]") - persona.daily_maintenance() - console.print("[green]✓ Maintenance completed[/green]") - - -@app.command() -def relationships( - data_dir: Optional[Path] = typer.Option(None, "--data-dir", "-d", help="Data directory") -): - """List all relationships""" - persona = get_persona(data_dir) - - table = Table(title="All Relationships") - table.add_column("User ID", style="cyan") - table.add_column("Status", style="magenta") - table.add_column("Score", style="green") - table.add_column("Transmission", style="yellow") - table.add_column("Last Interaction") - - for user_id, rel in persona.relationships.relationships.items(): - transmission = "✓" if rel.transmission_enabled else "✗" - if rel.is_broken: - transmission = "💔" - - last_interaction = rel.last_interaction.strftime("%Y-%m-%d") if rel.last_interaction else "Never" - - table.add_row( - user_id[:16] + "...", - rel.status.value, - f"{rel.score:.2f}", - transmission, - last_interaction - ) - - console.print(table) - - -@app.command() -def server( - host: str = typer.Option("localhost", "--host", "-h", help="Server host"), - port: int = typer.Option(8001, "--port", "-p", help="Server port"), - data_dir: Optional[Path] = typer.Option(None, "--data-dir", "-d", help="Data directory"), - model: Optional[str] = typer.Option(None, "--model", "-m", help="AI model to use"), - provider: Optional[str] = typer.Option(None, "--provider", help="AI provider (ollama/openai)") -): - """Run MCP server for AI integration""" - import uvicorn - - if data_dir is None: - data_dir = DEFAULT_DATA_DIR - - data_dir.mkdir(parents=True, exist_ok=True) - - # Get configuration - config_instance = Config() - - # Get defaults from config if not provided - if not provider: - provider = config_instance.get("default_provider", "ollama") - if not model: - if provider == "ollama": - model = config_instance.get("providers.ollama.default_model", "qwen3:latest") - elif provider == "openai": - model = config_instance.get("providers.openai.default_model", "gpt-4o-mini") - else: - model = "qwen3:latest" - - # Create MCP server - mcp_server = AIGptMcpServer(data_dir) - app_instance = mcp_server.app - - # Get endpoint categories and count - total_routes = len(mcp_server.app.routes) - mcp_tools = total_routes - 2 # Exclude docs and openapi - - # Categorize endpoints - memory_endpoints = ["get_memories", "search_memories", "get_contextual_memories", "create_summary", "create_core_memory"] - relationship_endpoints = ["get_relationship", "get_all_relationships", "process_interaction", "check_transmission_eligibility"] - system_endpoints = ["get_persona_state", "get_fortune", "run_maintenance"] - shell_endpoints = ["execute_command", "analyze_file", "write_file", "list_files", "read_project_file"] - remote_endpoints = ["remote_shell", "ai_bot_status", "isolated_python", "isolated_analysis"] - card_endpoints = ["card_get_user_cards", "card_draw_card", "card_get_card_details", "card_analyze_collection", "card_get_gacha_stats", "card_system_status"] - - # Check if ai.card tools are available - has_card_tools = mcp_server.has_card - - # Build endpoint summary - endpoint_summary = f"""🧠 Memory System: {len(memory_endpoints)} tools -🤝 Relationships: {len(relationship_endpoints)} tools -⚙️ System State: {len(system_endpoints)} tools -💻 Shell Integration: {len(shell_endpoints)} tools -🔒 Remote Execution: {len(remote_endpoints)} tools""" - - if has_card_tools: - endpoint_summary += f"\n🎴 Card Game System: {len(card_endpoints)} tools" - - # Check MCP client connectivity - mcp_client = MCPClient(config_instance) - mcp_status = "✅ MCP Client Ready" if mcp_client.available else "⚠️ MCP Client Disabled" - - # Add ai.card status if available - card_status = "" - if has_card_tools: - card_status = "\n🎴 ai.card: ./card directory detected" - - # Provider configuration check - provider_status = "✅ Ready" - if provider == "openai": - api_key = config_instance.get_api_key("openai") - if not api_key: - provider_status = "⚠️ No API Key" - elif provider == "ollama": - ollama_host = config_instance.get("providers.ollama.host", "http://localhost:11434") - provider_status = f"✅ {ollama_host}" - - console.print(Panel( - f"[bold cyan]🚀 ai.gpt MCP Server[/bold cyan]\n\n" - f"[green]Server Configuration:[/green]\n" - f"🌐 Address: http://{host}:{port}\n" - f"📋 API Docs: http://{host}:{port}/docs\n" - f"💾 Data Directory: {data_dir}\n\n" - f"[green]AI Provider Configuration:[/green]\n" - f"🤖 Provider: {provider} {provider_status}\n" - f"🧩 Model: {model}\n\n" - f"[green]MCP Tools Available ({mcp_tools} total):[/green]\n" - f"{endpoint_summary}\n\n" - f"[green]Integration Status:[/green]\n" - f"{mcp_status}\n" - f"🔗 Config: {config_instance.config_file}{card_status}\n\n" - f"[dim]Press Ctrl+C to stop server[/dim]", - title="🔧 MCP Server Startup", - border_style="green", - expand=True - )) - - # Store provider info in app state for later use - app_instance.state.ai_provider = provider - app_instance.state.ai_model = model - app_instance.state.config = config_instance - - # Run server with better logging - try: - uvicorn.run( - app_instance, - host=host, - port=port, - log_level="info", - access_log=False # Reduce noise - ) - except KeyboardInterrupt: - console.print("\n[yellow]🛑 MCP Server stopped[/yellow]") - except Exception as e: - console.print(f"\n[red]❌ Server error: {e}[/red]") - - -@app.command() -def schedule( - action: str = typer.Argument(..., help="Action: add, list, enable, disable, remove, run"), - task_type: Optional[str] = typer.Argument(None, help="Task type for add action"), - schedule_expr: Optional[str] = typer.Argument(None, help="Schedule expression (cron or interval)"), - data_dir: Optional[Path] = typer.Option(None, "--data-dir", "-d", help="Data directory"), - task_id: Optional[str] = typer.Option(None, "--task-id", "-t", help="Task ID"), - provider: Optional[str] = typer.Option(None, "--provider", help="AI provider for transmission"), - model: Optional[str] = typer.Option(None, "--model", "-m", help="AI model for transmission") -): - """Manage scheduled tasks""" - persona = get_persona(data_dir) - scheduler = AIScheduler(persona.data_dir, persona) - - if action == "add": - if not task_type or not schedule_expr: - console.print("[red]Error: task_type and schedule required for add action[/red]") - return - - # Parse task type - try: - task_type_enum = TaskType(task_type) - except ValueError: - console.print(f"[red]Invalid task type. Valid types: {', '.join([t.value for t in TaskType])}[/red]") - return - - # Metadata for transmission tasks - metadata = {} - if task_type_enum == TaskType.TRANSMISSION_CHECK: - metadata["provider"] = provider or "ollama" - metadata["model"] = model or "qwen2.5" - - try: - task = scheduler.add_task(task_type_enum, schedule_expr, task_id, metadata) - console.print(f"[green]✓ Added task {task.task_id}[/green]") - console.print(f"Type: {task.task_type.value}") - console.print(f"Schedule: {task.schedule}") - except ValueError as e: - console.print(f"[red]Error: {e}[/red]") - - elif action == "list": - tasks = scheduler.get_tasks() - if not tasks: - console.print("[yellow]No scheduled tasks[/yellow]") - return - - table = Table(title="Scheduled Tasks") - table.add_column("Task ID", style="cyan") - table.add_column("Type", style="magenta") - table.add_column("Schedule", style="green") - table.add_column("Enabled", style="yellow") - table.add_column("Last Run") - - for task in tasks: - enabled = "✓" if task.enabled else "✗" - last_run = task.last_run.strftime("%Y-%m-%d %H:%M") if task.last_run else "Never" - - table.add_row( - task.task_id[:20] + "..." if len(task.task_id) > 20 else task.task_id, - task.task_type.value, - task.schedule, - enabled, - last_run - ) - - console.print(table) - - elif action == "enable": - if not task_id: - console.print("[red]Error: --task-id required for enable action[/red]") - return - - scheduler.enable_task(task_id) - console.print(f"[green]✓ Enabled task {task_id}[/green]") - - elif action == "disable": - if not task_id: - console.print("[red]Error: --task-id required for disable action[/red]") - return - - scheduler.disable_task(task_id) - console.print(f"[yellow]✓ Disabled task {task_id}[/yellow]") - - elif action == "remove": - if not task_id: - console.print("[red]Error: --task-id required for remove action[/red]") - return - - scheduler.remove_task(task_id) - console.print(f"[red]✓ Removed task {task_id}[/red]") - - elif action == "run": - console.print("[cyan]Starting scheduler daemon...[/cyan]") - console.print("Press Ctrl+C to stop\n") - - import asyncio - - async def run_scheduler(): - scheduler.start() - try: - while True: - await asyncio.sleep(1) - except KeyboardInterrupt: - scheduler.stop() - - try: - asyncio.run(run_scheduler()) - except KeyboardInterrupt: - console.print("\n[yellow]Scheduler stopped[/yellow]") - - else: - console.print(f"[red]Unknown action: {action}[/red]") - console.print("Valid actions: add, list, enable, disable, remove, run") - - -@app.command() -def shell( - data_dir: Optional[Path] = typer.Option(None, "--data-dir", "-d", help="Data directory"), - model: Optional[str] = typer.Option(None, "--model", "-m", help="AI model to use"), - provider: Optional[str] = typer.Option(None, "--provider", help="AI provider (ollama/openai)") -): - """Interactive shell mode (ai.shell)""" - persona = get_persona(data_dir) - - # Get defaults from config if not provided - config_instance = Config() - if not provider: - provider = config_instance.get("default_provider", "ollama") - if not model: - if provider == "ollama": - model = config_instance.get("providers.ollama.default_model", "qwen3:latest") - elif provider == "openai": - model = config_instance.get("providers.openai.default_model", "gpt-4o-mini") - else: - model = "qwen3:latest" # fallback - - # Create AI provider - ai_provider = None - try: - ai_provider = create_ai_provider(provider=provider, model=model) - console.print(f"[dim]Using {provider} with model {model}[/dim]\n") - except Exception as e: - console.print(f"[yellow]Warning: Could not create AI provider: {e}[/yellow]") - console.print("[yellow]Falling back to simple responses[/yellow]\n") - - # Welcome message - console.print(Panel( - "[cyan]Welcome to ai.shell[/cyan]\n\n" - "Interactive AI-powered shell with command execution\n\n" - "Commands:\n" - " help - Show available commands\n" - " exit/quit - Exit shell\n" - " ! - Execute shell command\n" - " chat - Chat with AI\n" - " status - Show AI status\n" - " clear - Clear screen\n\n" - "Type any message to interact with AI", - title="ai.shell", - border_style="green" - )) - - # Custom completer for ai.shell - class ShellCompleter(Completer): - def __init__(self): - # Slash commands (built-in) - self.slash_commands = [ - '/help', '/exit', '/quit', '/status', '/clear', '/load', - '/fortune', '/relationships' - ] - - # AI commands - self.ai_commands = [ - '/analyze', '/generate', '/explain', '/optimize', - '/refactor', '/test', '/document' - ] - - # Project commands - self.project_commands = [ - '/project-status', '/suggest-next', '/continuous' - ] - - # Remote commands - self.remote_commands = [ - '/remote', '/isolated', '/aibot-status' - ] - - # Shell commands (with ! prefix) - self.shell_commands = [ - '!ls', '!cd', '!pwd', '!cat', '!echo', '!grep', '!find', - '!mkdir', '!rm', '!cp', '!mv', '!git', '!python', '!pip', - '!npm', '!node', '!cargo', '!rustc', '!docker', '!kubectl' - ] - - # All commands combined - self.all_commands = (self.slash_commands + self.ai_commands + - self.project_commands + self.remote_commands + - self.shell_commands) - - def get_completions(self, document, complete_event): - text = document.text_before_cursor - - # For slash commands - if text.startswith('/'): - for cmd in self.all_commands: - if cmd.startswith('/') and cmd.startswith(text): - yield Completion(cmd, start_position=-len(text)) - - # For shell commands (!) - elif text.startswith('!'): - for cmd in self.shell_commands: - if cmd.startswith(text): - yield Completion(cmd, start_position=-len(text)) - - # For regular text (AI chat) - else: - # Common AI prompts - ai_prompts = [ - 'analyze this file', 'generate code for', 'explain how to', - 'optimize this', 'refactor the', 'create tests for', - 'document this code', 'help me with' - ] - for prompt in ai_prompts: - if prompt.startswith(text.lower()): - yield Completion(prompt, start_position=-len(text)) - - completer = ShellCompleter() - - # History file - actual_data_dir = data_dir if data_dir else DEFAULT_DATA_DIR - history_file = actual_data_dir / "shell_history.txt" - history = FileHistory(str(history_file)) - - # Main shell loop - current_user = "shell_user" # Default user for shell sessions - - while True: - try: - # Get input with completion - user_input = ptk_prompt( - "ai.shell> ", - completer=completer, - history=history, - auto_suggest=AutoSuggestFromHistory() - ).strip() - - if not user_input: - continue - - # Exit commands - if user_input.lower() in ['exit', 'quit', '/exit', '/quit']: - console.print("[cyan]Goodbye![/cyan]") - break - - # Help command - elif user_input.lower() in ['help', '/help', '/']: - console.print(Panel( - "[cyan]ai.shell Commands:[/cyan]\n\n" - " /help, / - Show this help message\n" - " /exit, /quit - Exit the shell\n" - " ! - Execute a shell command (!ls, !git status)\n" - " /status - Show AI status\n" - " /fortune - Check AI fortune\n" - " /relationships - List all relationships\n" - " /clear - Clear the screen\n" - " /load - Load aishell.md project file\n\n" - "[cyan]AI Commands:[/cyan]\n" - " /analyze - Analyze a file with AI\n" - " /generate - Generate code from description\n" - " /explain - Get AI explanation\n\n" - "[cyan]Remote Commands (ai.bot):[/cyan]\n" - " /remote - Execute command in isolated container\n" - " /isolated - Run Python code in isolated environment\n" - " /aibot-status - Check ai.bot server status\n\n" - "[cyan]Project Commands (Claude Code-like):[/cyan]\n" - " /project-status - Analyze current project structure\n" - " /suggest-next - AI suggests next development steps\n" - " /continuous - Enable continuous development mode\n\n" - "[cyan]Tab Completion:[/cyan]\n" - " /[Tab] - Show all slash commands\n" - " ![Tab] - Show all shell commands\n" - " [Tab] - AI prompt suggestions\n\n" - "Type any message to chat with AI", - title="Help", - border_style="yellow" - )) - - # Clear command - elif user_input.lower() in ['clear', '/clear']: - console.clear() - - # Shell command execution - elif user_input.startswith('!'): - cmd = user_input[1:].strip() - if cmd: - try: - # Execute command - result = subprocess.run( - shlex.split(cmd), - capture_output=True, - text=True, - shell=False - ) - - if result.stdout: - console.print(result.stdout.rstrip()) - if result.stderr: - console.print(f"[red]{result.stderr.rstrip()}[/red]") - - if result.returncode != 0: - console.print(f"[red]Command exited with code {result.returncode}[/red]") - except FileNotFoundError: - console.print(f"[red]Command not found: {cmd.split()[0]}[/red]") - except Exception as e: - console.print(f"[red]Error executing command: {e}[/red]") - - # Status command - elif user_input.lower() in ['status', '/status']: - state = persona.get_current_state() - console.print(f"\nMood: {state.current_mood}") - console.print(f"Fortune: {state.fortune.fortune_value}/10") - - rel = persona.relationships.get_or_create_relationship(current_user) - console.print(f"\nRelationship Status: {rel.status.value}") - console.print(f"Score: {rel.score:.2f} / {rel.threshold}") - - # Fortune command - elif user_input.lower() in ['fortune', '/fortune']: - fortune = persona.fortune_system.get_today_fortune() - fortune_bar = "🌟" * fortune.fortune_value + "☆" * (10 - fortune.fortune_value) - console.print(f"\n{fortune_bar}") - console.print(f"Today's Fortune: {fortune.fortune_value}/10") - - # Relationships command - elif user_input.lower() in ['relationships', '/relationships']: - if persona.relationships.relationships: - console.print("\n[cyan]Relationships:[/cyan]") - for user_id, rel in persona.relationships.relationships.items(): - console.print(f" {user_id[:16]}... - {rel.status.value} ({rel.score:.2f})") - else: - console.print("[yellow]No relationships yet[/yellow]") - - # Load aishell.md command - elif user_input.lower() in ['load', '/load', 'load aishell.md', 'project']: - # Try to find and load aishell.md - search_paths = [ - Path.cwd() / "aishell.md", - Path.cwd() / "docs" / "aishell.md", - actual_data_dir.parent / "aishell.md", - Path.cwd() / "claude.md", # Also check for claude.md - ] - - loaded = False - for path in search_paths: - if path.exists(): - console.print(f"[cyan]Loading project file: {path}[/cyan]") - with open(path, 'r', encoding='utf-8') as f: - content = f.read() - - # Process with AI to understand project - load_prompt = f"I've loaded the project specification. Please analyze it and understand the project goals:\n\n{content[:3000]}" - response, _ = persona.process_interaction(current_user, load_prompt, ai_provider) - console.print(f"\n[green]Project loaded successfully![/green]") - console.print(f"[cyan]AI Understanding:[/cyan]\n{response}") - loaded = True - break - - if not loaded: - console.print("[yellow]No aishell.md or claude.md found in project.[/yellow]") - console.print("Create aishell.md to define project goals and AI instructions.") - - # AI-powered commands - elif user_input.lower().startswith(('analyze ', '/analyze ')): - # Analyze file or code with project context - target = user_input.split(' ', 1)[1].strip() if ' ' in user_input else '' - if target and os.path.exists(target): - console.print(f"[cyan]Analyzing {target} with project context...[/cyan]") - try: - developer = ContinuousDeveloper(Path.cwd(), ai_provider) - analysis = developer.analyze_file(target) - console.print(f"\n[cyan]Analysis:[/cyan]\n{analysis}") - except Exception as e: - # Fallback to simple analysis - with open(target, 'r') as f: - content = f.read() - analysis_prompt = f"Analyze this file and provide insights:\n\n{content[:2000]}" - response, _ = persona.process_interaction(current_user, analysis_prompt, ai_provider) - console.print(f"\n[cyan]Analysis:[/cyan]\n{response}") - else: - console.print(f"[red]Usage: /analyze [/red]") - - elif user_input.lower().startswith(('generate ', '/generate ')): - # Generate code with project context - gen_prompt = user_input.split(' ', 1)[1].strip() if ' ' in user_input else '' - if gen_prompt: - console.print("[cyan]Generating code with project context...[/cyan]") - try: - developer = ContinuousDeveloper(Path.cwd(), ai_provider) - generated_code = developer.generate_code(gen_prompt) - console.print(f"\n[cyan]Generated Code:[/cyan]\n{generated_code}") - except Exception as e: - # Fallback to simple generation - full_prompt = f"Generate code for: {gen_prompt}. Provide clean, well-commented code." - response, _ = persona.process_interaction(current_user, full_prompt, ai_provider) - console.print(f"\n[cyan]Generated Code:[/cyan]\n{response}") - else: - console.print(f"[red]Usage: /generate [/red]") - - elif user_input.lower().startswith(('explain ', '/explain ')): - # Explain code or concept - topic = user_input[8:].strip() - if topic: - console.print(f"[cyan]Explaining {topic}...[/cyan]") - full_prompt = f"Explain this in detail: {topic}" - response, _ = persona.process_interaction(current_user, full_prompt, ai_provider) - console.print(f"\n[cyan]Explanation:[/cyan]\n{response}") - - # Remote execution commands (ai.bot integration) - elif user_input.lower().startswith('remote '): - # Execute command in ai.bot isolated container - command = user_input[7:].strip() - if command: - console.print(f"[cyan]Executing remotely:[/cyan] {command}") - try: - import httpx - import asyncio - - async def execute_remote(): - async with httpx.AsyncClient(timeout=30.0) as client: - response = await client.post( - "http://localhost:8080/sh", - json={"command": command}, - headers={"Content-Type": "application/json"} - ) - return response - - response = asyncio.run(execute_remote()) - - if response.status_code == 200: - result = response.json() - console.print(f"[green]Output:[/green]\n{result.get('output', '')}") - if result.get('error'): - console.print(f"[red]Error:[/red] {result.get('error')}") - console.print(f"[dim]Exit code: {result.get('exit_code', 0)} | Execution time: {result.get('execution_time', 'N/A')}[/dim]") - else: - console.print(f"[red]ai.bot error: HTTP {response.status_code}[/red]") - except Exception as e: - console.print(f"[red]Failed to connect to ai.bot: {e}[/red]") - - elif user_input.lower().startswith('isolated '): - # Execute Python code in isolated environment - code = user_input[9:].strip() - if code: - console.print(f"[cyan]Running Python code in isolated container...[/cyan]") - try: - import httpx - import asyncio - - async def execute_python(): - python_command = f'python3 -c "{code.replace('"', '\\"')}"' - async with httpx.AsyncClient(timeout=30.0) as client: - response = await client.post( - "http://localhost:8080/sh", - json={"command": python_command}, - headers={"Content-Type": "application/json"} - ) - return response - - response = asyncio.run(execute_python()) - - if response.status_code == 200: - result = response.json() - console.print(f"[green]Python Output:[/green]\n{result.get('output', '')}") - if result.get('error'): - console.print(f"[red]Error:[/red] {result.get('error')}") - else: - console.print(f"[red]ai.bot error: HTTP {response.status_code}[/red]") - except Exception as e: - console.print(f"[red]Failed to execute Python code: {e}[/red]") - - elif user_input.lower() == 'aibot-status': - # Check ai.bot server status - console.print("[cyan]Checking ai.bot server status...[/cyan]") - try: - import httpx - import asyncio - - async def check_status(): - async with httpx.AsyncClient(timeout=10.0) as client: - response = await client.get("http://localhost:8080/status") - return response - - response = asyncio.run(check_status()) - - if response.status_code == 200: - result = response.json() - console.print(f"[green]ai.bot is online![/green]") - console.print(f"Server info: {result}") - else: - console.print(f"[yellow]ai.bot responded with status {response.status_code}[/yellow]") - except Exception as e: - console.print(f"[red]ai.bot is offline: {e}[/red]") - console.print("[dim]Make sure ai.bot is running on localhost:8080[/dim]") - - # Project management commands (Claude Code-like) - elif user_input.lower() == 'project-status': - # プロジェクト構造分析 - console.print("[cyan]Analyzing project structure...[/cyan]") - try: - developer = ContinuousDeveloper(Path.cwd(), ai_provider) - analysis = developer.analyze_project_structure() - changes = developer.project_state.detect_changes() - - console.print(f"[green]Project Analysis:[/green]") - console.print(f"Language: {analysis['language']}") - console.print(f"Framework: {analysis['framework']}") - console.print(f"Structure: {analysis['structure']}") - console.print(f"Dependencies: {analysis['dependencies']}") - console.print(f"Code Patterns: {analysis['patterns']}") - - if changes: - console.print(f"\n[yellow]Recent Changes:[/yellow]") - for file_path, change_type in changes.items(): - console.print(f" {change_type}: {file_path}") - else: - console.print(f"\n[dim]No recent changes detected[/dim]") - - except Exception as e: - console.print(f"[red]Error analyzing project: {e}[/red]") - - elif user_input.lower() == 'suggest-next': - # 次のステップを提案 - console.print("[cyan]AI is analyzing project and suggesting next steps...[/cyan]") - try: - developer = ContinuousDeveloper(Path.cwd(), ai_provider) - suggestions = developer.suggest_next_steps() - - console.print(f"[green]Suggested Next Steps:[/green]") - for i, suggestion in enumerate(suggestions, 1): - console.print(f" {i}. {suggestion}") - - except Exception as e: - console.print(f"[red]Error generating suggestions: {e}[/red]") - - elif user_input.lower().startswith('continuous'): - # 継続開発モード - console.print("[cyan]Enabling continuous development mode...[/cyan]") - console.print("[yellow]Continuous mode is experimental. Type 'exit-continuous' to exit.[/yellow]") - - try: - developer = ContinuousDeveloper(Path.cwd(), ai_provider) - context = developer.load_project_context() - - console.print(f"[green]Project context loaded:[/green]") - console.print(f"Context: {len(context)} characters") - - # Add to session memory for continuous context - persona.process_interaction(current_user, f"Continuous development mode started for project: {context[:500]}", ai_provider) - console.print("[dim]Project context added to AI memory for continuous development.[/dim]") - - except Exception as e: - console.print(f"[red]Error starting continuous mode: {e}[/red]") - - # Chat command or direct message - else: - # Remove 'chat' prefix if present - if user_input.lower().startswith('chat '): - message = user_input[5:].strip() - else: - message = user_input - - if message: - # Process interaction with AI - response, relationship_delta = persona.process_interaction( - current_user, message, ai_provider - ) - - # Display response - console.print(f"\n[cyan]AI:[/cyan] {response}") - - # Show relationship change if significant - if abs(relationship_delta) >= 0.1: - if relationship_delta > 0: - console.print(f"[green](+{relationship_delta:.2f} relationship)[/green]") - else: - console.print(f"[red]({relationship_delta:.2f} relationship)[/red]") - - except KeyboardInterrupt: - console.print("\n[yellow]Use 'exit' or 'quit' to leave the shell[/yellow]") - except EOFError: - console.print("\n[cyan]Goodbye![/cyan]") - break - except Exception as e: - console.print(f"[red]Error: {e}[/red]") - - -@app.command() -def config( - action: str = typer.Argument(..., help="Action: get, set, delete, list"), - key: Optional[str] = typer.Argument(None, help="Configuration key (dot notation)"), - value: Optional[str] = typer.Argument(None, help="Value to set") -): - """Manage configuration settings""" - - if action == "get": - if not key: - console.print("[red]Error: key required for get action[/red]") - return - - config_instance = Config() - val = config_instance.get(key) - if val is None: - console.print(f"[yellow]Key '{key}' not found[/yellow]") - else: - console.print(f"[cyan]{key}[/cyan] = [green]{val}[/green]") - - elif action == "set": - if not key or value is None: - console.print("[red]Error: key and value required for set action[/red]") - return - - config_instance = Config() - # Special handling for sensitive keys - if "password" in key or "api_key" in key: - console.print(f"[cyan]Setting {key}[/cyan] = [dim]***hidden***[/dim]") - else: - console.print(f"[cyan]Setting {key}[/cyan] = [green]{value}[/green]") - - config_instance.set(key, value) - console.print("[green]✓ Configuration saved[/green]") - - elif action == "delete": - if not key: - console.print("[red]Error: key required for delete action[/red]") - return - - config_instance = Config() - if config_instance.delete(key): - console.print(f"[green]✓ Deleted {key}[/green]") - else: - console.print(f"[yellow]Key '{key}' not found[/yellow]") - - elif action == "list": - config_instance = Config() - keys = config_instance.list_keys(key or "") - - if not keys: - console.print("[yellow]No configuration keys found[/yellow]") - return - - table = Table(title="Configuration Settings") - table.add_column("Key", style="cyan") - table.add_column("Value", style="green") - - for k in sorted(keys): - val = config_instance.get(k) - # Hide sensitive values - if "password" in k or "api_key" in k: - display_val = "***hidden***" if val else "not set" - else: - display_val = str(val) if val is not None else "not set" - - table.add_row(k, display_val) - - console.print(table) - - else: - console.print(f"[red]Unknown action: {action}[/red]") - console.print("Valid actions: get, set, delete, list") - - -@app.command() -def import_chatgpt( - file_path: Path = typer.Argument(..., help="Path to ChatGPT export JSON file"), - user_id: str = typer.Option("chatgpt_user", "--user-id", "-u", help="User ID for imported conversations"), - data_dir: Optional[Path] = typer.Option(None, "--data-dir", "-d", help="Data directory") -): - """Import ChatGPT conversation data into ai.gpt memory system""" - from .chatgpt_importer import ChatGPTImporter - - if data_dir is None: - data_dir = DEFAULT_DATA_DIR - - data_dir.mkdir(parents=True, exist_ok=True) - - if not file_path.exists(): - console.print(f"[red]Error: File not found: {file_path}[/red]") - raise typer.Exit(1) - - console.print(f"[cyan]Importing ChatGPT data from {file_path}[/cyan]") - console.print(f"User ID: {user_id}") - console.print(f"Data directory: {data_dir}") - - try: - importer = ChatGPTImporter(data_dir) - stats = importer.import_from_file(file_path, user_id) - - # Display results - table = Table(title="Import Results") - table.add_column("Metric", style="cyan") - table.add_column("Count", style="green") - - table.add_row("Conversations imported", str(stats["conversations_imported"])) - table.add_row("Total messages", str(stats["messages_imported"])) - table.add_row("User messages", str(stats["user_messages"])) - table.add_row("Assistant messages", str(stats["assistant_messages"])) - table.add_row("Skipped messages", str(stats["skipped_messages"])) - - console.print(table) - console.print(f"[green]✓ Import completed successfully![/green]") - - # Show next steps - console.print("\n[cyan]Next steps:[/cyan]") - console.print(f"- Check memories: [yellow]aigpt status[/yellow]") - console.print(f"- Chat with AI: [yellow]aigpt chat {user_id} \"hello\"[/yellow]") - console.print(f"- View relationships: [yellow]aigpt relationships[/yellow]") - - except Exception as e: - console.print(f"[red]Error during import: {e}[/red]") - raise typer.Exit(1) - - -@app.command() -def conversation( - user_id: str = typer.Argument(..., help="User ID (atproto DID)"), - data_dir: Optional[Path] = typer.Option(None, "--data-dir", "-d", help="Data directory"), - model: Optional[str] = typer.Option(None, "--model", "-m", help="AI model to use"), - provider: Optional[str] = typer.Option(None, "--provider", help="AI provider (ollama/openai)") -): - """Simple continuous conversation mode with MCP support""" - # Initialize MCP client - mcp_client = MCPClient() - persona = get_persona(data_dir) - - # Get defaults from config if not provided - config_instance = Config() - if not provider: - provider = config_instance.get("default_provider", "ollama") - if not model: - if provider == "ollama": - model = config_instance.get("providers.ollama.default_model", "qwen3:latest") - elif provider == "openai": - model = config_instance.get("providers.openai.default_model", "gpt-4o-mini") - else: - model = "qwen3:latest" # fallback - - # Create AI provider with MCP client - ai_provider = None - try: - ai_provider = create_ai_provider(provider=provider, model=model, mcp_client=mcp_client) - console.print(f"[dim]Using {provider} with model {model}[/dim]") - except Exception as e: - console.print(f"[yellow]Warning: Could not create AI provider: {e}[/yellow]") - - # MCP status - server_info = mcp_client.get_server_info() - if server_info["available"]: - console.print(f"[green]✓ MCP Server connected: {server_info['display_name']}[/green]") - console.print(f"[dim] URL: {server_info['base_url']} | Endpoints: {server_info['endpoints']}[/dim]") - else: - console.print(f"[yellow]⚠ MCP Server unavailable (running in local mode)[/yellow]") - - # Welcome message - console.print(f"[cyan]Conversation with AI started. Type 'exit' or 'quit' to end.[/cyan]") - if server_info["available"]: - console.print(f"[dim]MCP commands: /memories, /search, /context, /relationship[/dim]\n") - else: - console.print() - - # History for conversation mode - actual_data_dir = data_dir if data_dir else DEFAULT_DATA_DIR - history_file = actual_data_dir / "conversation_history.txt" - history = FileHistory(str(history_file)) - - # Custom completer for slash commands and phrases with MCP support - class ConversationCompleter(Completer): - def __init__(self, mcp_available: bool = False): - self.basic_commands = ['/status', '/help', '/clear', '/exit', '/quit'] - self.mcp_commands = ['/memories', '/search', '/context', '/relationship'] if mcp_available else [] - self.phrases = ['こんにちは', '今日は', 'ありがとう', 'お疲れ様', - 'どう思う?', 'どうですか?', '教えて', 'わかりました'] - self.all_commands = self.basic_commands + self.mcp_commands - - def get_completions(self, document, complete_event): - text = document.text_before_cursor - - # If text starts with '/', complete slash commands - if text.startswith('/'): - for cmd in self.all_commands: - if cmd.startswith(text): - yield Completion(cmd, start_position=-len(text)) - # For other text, complete phrases - else: - for phrase in self.phrases: - if phrase.startswith(text): - yield Completion(phrase, start_position=-len(text)) - - completer = ConversationCompleter(mcp_client.available) - - while True: - try: - # Simple prompt with completion - user_input = ptk_prompt( - f"{user_id}> ", - history=history, - auto_suggest=AutoSuggestFromHistory(), - completer=completer - ).strip() - - if not user_input: - continue - - # Exit commands - if user_input.lower() in ['exit', 'quit', 'bye', '/exit', '/quit']: - console.print("[cyan]Conversation ended.[/cyan]") - break - - # Slash commands - elif user_input.lower() == '/status': - state = persona.get_current_state() - rel = persona.relationships.get_or_create_relationship(user_id) - console.print(f"\n[cyan]AI Status:[/cyan]") - console.print(f"Mood: {state.current_mood}") - console.print(f"Fortune: {state.fortune.fortune_value}/10") - console.print(f"Relationship: {rel.status.value} ({rel.score:.2f})") - console.print("") - continue - - elif user_input.lower() in ['/help', '/']: - console.print(f"\n[cyan]Conversation Commands:[/cyan]") - console.print(" /status - Show AI status and relationship") - console.print(" /help - Show this help") - console.print(" /clear - Clear screen") - console.print(" /exit - End conversation") - console.print(" / - Show commands (same as /help)") - if mcp_client.available: - console.print(f"\n[cyan]MCP Commands:[/cyan]") - console.print(" /memories - Show recent memories") - console.print(" /search - Search memories") - console.print(" /context - Get contextual memories") - console.print(" /relationship - Show relationship via MCP") - - if mcp_client.has_card_tools: - console.print(f"\n[cyan]Card Commands:[/cyan]") - console.print(" AI can answer questions about cards:") - console.print(" - 'Show my cards'") - console.print(" - 'Draw a card' / 'Gacha'") - console.print(" - 'Analyze my collection'") - console.print(" - 'Show gacha stats'") - console.print("\n - Chat with AI\n") - continue - - elif user_input.lower() == '/clear': - console.clear() - continue - - # MCP Commands - elif user_input.lower() == '/memories' and mcp_client.available: - memories = asyncio.run(mcp_client.get_memories(limit=5)) - if memories: - console.print(f"\n[cyan]Recent Memories (via MCP):[/cyan]") - for i, mem in enumerate(memories[:5], 1): - console.print(f" {i}. [{mem.get('level', 'unknown')}] {mem.get('content', '')[:100]}...") - console.print("") - else: - console.print("[yellow]No memories found[/yellow]") - continue - - elif user_input.lower().startswith('/search ') and mcp_client.available: - query = user_input[8:].strip() - if query: - keywords = query.split() - results = asyncio.run(mcp_client.search_memories(keywords)) - if results: - console.print(f"\n[cyan]Memory Search Results for '{query}' (via MCP):[/cyan]") - for i, mem in enumerate(results[:5], 1): - console.print(f" {i}. {mem.get('content', '')[:100]}...") - console.print("") - else: - console.print(f"[yellow]No memories found for '{query}'[/yellow]") - else: - console.print("[red]Usage: /search [/red]") - continue - - elif user_input.lower().startswith('/context ') and mcp_client.available: - query = user_input[9:].strip() - if query: - results = asyncio.run(mcp_client.get_contextual_memories(query, limit=5)) - if results: - console.print(f"\n[cyan]Contextual Memories for '{query}' (via MCP):[/cyan]") - for i, mem in enumerate(results[:5], 1): - console.print(f" {i}. {mem.get('content', '')[:100]}...") - console.print("") - else: - console.print(f"[yellow]No contextual memories found for '{query}'[/yellow]") - else: - console.print("[red]Usage: /context [/red]") - continue - - elif user_input.lower() == '/relationship' and mcp_client.available: - rel_data = asyncio.run(mcp_client.get_relationship(user_id)) - if rel_data: - console.print(f"\n[cyan]Relationship (via MCP):[/cyan]") - console.print(f"Status: {rel_data.get('status', 'unknown')}") - console.print(f"Score: {rel_data.get('score', 0):.2f}") - console.print(f"Interactions: {rel_data.get('total_interactions', 0)}") - console.print("") - else: - console.print("[yellow]No relationship data found[/yellow]") - continue - - # Process interaction - try MCP first, fallback to local - if mcp_client.available: - try: - mcp_result = asyncio.run(mcp_client.process_interaction(user_id, user_input)) - if mcp_result and 'response' in mcp_result: - response = mcp_result['response'] - console.print(f"AI> {response} [dim](via MCP)[/dim]\n") - continue - except Exception as e: - console.print(f"[yellow]MCP failed, using local: {e}[/yellow]") - - # Fallback to local processing - response, relationship_delta = persona.process_interaction(user_id, user_input, ai_provider) - - # Simple AI response display (no Panel, no extra info) - console.print(f"AI> {response}\n") - - except KeyboardInterrupt: - console.print("\n[yellow]Use 'exit' or 'quit' to end conversation[/yellow]") - except EOFError: - console.print("\n[cyan]Conversation ended.[/cyan]") - break - except Exception as e: - console.print(f"[red]Error: {e}[/red]") - - -# Alias for conversation command -@app.command() -def conv( - user_id: str = typer.Argument(..., help="User ID (atproto DID)"), - data_dir: Optional[Path] = typer.Option(None, "--data-dir", "-d", help="Data directory"), - model: Optional[str] = typer.Option(None, "--model", "-m", help="AI model to use"), - provider: Optional[str] = typer.Option(None, "--provider", help="AI provider (ollama/openai)") -): - """Alias for conversation command""" - conversation(user_id, data_dir, model, provider) - - -# Add documentation subcommand -app.add_typer(docs_app, name="docs", help="Documentation management") - -# Add submodules subcommand -app.add_typer(submodules_app, name="submodules", help="Submodule management") - -# Add tokens subcommand -app.add_typer(tokens_app, name="tokens", help="Claude Code token usage and cost analysis") - - -if __name__ == "__main__": - app() \ No newline at end of file diff --git a/python_backup/src/aigpt/commands/docs.py b/python_backup/src/aigpt/commands/docs.py deleted file mode 100644 index 9ae640a..0000000 --- a/python_backup/src/aigpt/commands/docs.py +++ /dev/null @@ -1,729 +0,0 @@ -"""Documentation management commands for ai.gpt.""" - -from pathlib import Path -from typing import Dict, List, Optional - -import typer -from rich.console import Console -from rich.panel import Panel -from rich.progress import track -from rich.table import Table - -from ..docs.config import get_ai_root, load_docs_config -from ..docs.templates import DocumentationTemplateManager -from ..docs.git_utils import ensure_submodules_available -from ..docs.wiki_generator import WikiGenerator -from ..docs.utils import ( - ProgressManager, - count_lines, - find_project_directories, - format_file_size, - safe_write_file, - validate_project_name, -) - -console = Console() -docs_app = typer.Typer(help="Documentation management for AI ecosystem") - - -@docs_app.command("generate") -def generate_docs( - project: str = typer.Option(..., "--project", "-p", help="Project name (os, gpt, card, etc.)"), - output: Path = typer.Option(Path("./claude.md"), "--output", "-o", help="Output file path"), - include: str = typer.Option("core,specific", "--include", "-i", help="Components to include"), - dir: Optional[Path] = typer.Option(None, "--dir", "-d", help="AI ecosystem root directory"), - auto_pull: bool = typer.Option(True, "--auto-pull/--no-auto-pull", help="Automatically pull missing submodules"), - ai_gpt_integration: bool = typer.Option(False, "--ai-gpt-integration", help="Enable ai.gpt integration"), - dry_run: bool = typer.Option(False, "--dry-run", help="Show what would be generated without writing files"), - verbose: bool = typer.Option(False, "--verbose", "-v", help="Enable verbose output"), -) -> None: - """Generate project documentation with Claude AI integration. - - Creates comprehensive documentation by combining core philosophy, - architecture, and project-specific content. Supports ai.gpt - integration for enhanced documentation generation. - - Examples: - - # Generate basic documentation - aigpt docs generate --project=os - - # Generate with custom directory - aigpt docs generate --project=gpt --dir ~/ai/ai - - # Generate without auto-pulling missing submodules - aigpt docs generate --project=card --no-auto-pull - - # Generate with ai.gpt integration - aigpt docs generate --project=card --ai-gpt-integration - - # Preview without writing - aigpt docs generate --project=verse --dry-run - """ - try: - # Load configuration - with ProgressManager("Loading configuration...") as progress: - config = load_docs_config(dir) - ai_root = get_ai_root(dir) - - # Ensure submodules are available - if auto_pull: - with ProgressManager("Checking submodules...") as progress: - success, errors = ensure_submodules_available(ai_root, config, auto_clone=True) - if not success: - console.print(f"[red]Submodule errors: {errors}[/red]") - if not typer.confirm("Continue anyway?"): - raise typer.Abort() - - # Validate project - available_projects = config.list_projects() - if not validate_project_name(project, available_projects): - console.print(f"[red]Error: Project '{project}' not found[/red]") - console.print(f"Available projects: {', '.join(available_projects)}") - raise typer.Abort() - - # Parse components - components = [c.strip() for c in include.split(",")] - - # Initialize template manager - template_manager = DocumentationTemplateManager(config) - - # Validate components - valid_components = template_manager.validate_components(components) - if valid_components != components: - console.print("[yellow]Some components were invalid and filtered out[/yellow]") - - # Show generation info - project_info = config.get_project_info(project) - - info_table = Table(title=f"Documentation Generation: {project}") - info_table.add_column("Property", style="cyan") - info_table.add_column("Value", style="green") - - info_table.add_row("Project Type", project_info.type if project_info else "Unknown") - info_table.add_row("Status", project_info.status if project_info else "Unknown") - info_table.add_row("Output Path", str(output)) - info_table.add_row("Components", ", ".join(valid_components)) - info_table.add_row("AI.GPT Integration", "✓" if ai_gpt_integration else "✗") - info_table.add_row("Mode", "Dry Run" if dry_run else "Generate") - - console.print(info_table) - console.print() - - # AI.GPT integration - if ai_gpt_integration: - console.print("[blue]🤖 AI.GPT Integration enabled[/blue]") - try: - enhanced_content = _integrate_with_ai_gpt(project, valid_components, verbose) - if enhanced_content: - console.print("[green]✓ AI.GPT enhancement applied[/green]") - else: - console.print("[yellow]⚠ AI.GPT enhancement failed, using standard generation[/yellow]") - except Exception as e: - console.print(f"[yellow]⚠ AI.GPT integration error: {e}[/yellow]") - console.print("[dim]Falling back to standard generation[/dim]") - - # Generate documentation - with ProgressManager("Generating documentation...") as progress: - content = template_manager.generate_documentation( - project_name=project, - components=valid_components, - output_path=None if dry_run else output, - ) - - # Show results - if dry_run: - console.print(Panel( - f"[dim]Preview of generated content ({len(content.splitlines())} lines)[/dim]\n\n" + - content[:500] + "\n\n[dim]... (truncated)[/dim]", - title="Dry Run Preview", - expand=False, - )) - console.print(f"[yellow]🔍 Dry run completed. Would write to: {output}[/yellow]") - else: - # Write content if not dry run - if safe_write_file(output, content): - file_size = output.stat().st_size - line_count = count_lines(output) - - console.print(f"[green]✅ Generated: {output}[/green]") - console.print(f"[dim]📏 Size: {format_file_size(file_size)} ({line_count} lines)[/dim]") - - # Show component breakdown - if verbose: - console.print("\n[blue]📋 Component breakdown:[/blue]") - for component in valid_components: - component_display = component.replace("_", " ").title() - console.print(f" • {component_display}") - else: - console.print("[red]❌ Failed to write documentation[/red]") - raise typer.Abort() - - except Exception as e: - if verbose: - console.print_exception() - else: - console.print(f"[red]Error: {e}[/red]") - raise typer.Abort() - - -@docs_app.command("sync") -def sync_docs( - project: Optional[str] = typer.Option(None, "--project", "-p", help="Sync specific project"), - sync_all: bool = typer.Option(False, "--all", "-a", help="Sync all available projects"), - dry_run: bool = typer.Option(False, "--dry-run", help="Show what would be done without making changes"), - include: str = typer.Option("core,specific", "--include", "-i", help="Components to include in sync"), - dir: Optional[Path] = typer.Option(None, "--dir", "-d", help="AI ecosystem root directory"), - auto_pull: bool = typer.Option(True, "--auto-pull/--no-auto-pull", help="Automatically pull missing submodules"), - ai_gpt_integration: bool = typer.Option(False, "--ai-gpt-integration", help="Enable ai.gpt integration"), - verbose: bool = typer.Option(False, "--verbose", "-v", help="Enable verbose output"), -) -> None: - """Sync documentation across multiple projects. - - Synchronizes Claude documentation from the central claude/ directory - to individual project directories. Supports both single-project and - bulk synchronization operations. - - Examples: - - # Sync specific project - aigpt docs sync --project=os - - # Sync all projects with custom directory - aigpt docs sync --all --dir ~/ai/ai - - # Preview sync operations - aigpt docs sync --all --dry-run - - # Sync without auto-pulling submodules - aigpt docs sync --project=gpt --no-auto-pull - """ - # Validate arguments - if not project and not sync_all: - console.print("[red]Error: Either --project or --all is required[/red]") - raise typer.Abort() - - if project and sync_all: - console.print("[red]Error: Cannot use both --project and --all[/red]") - raise typer.Abort() - - try: - # Load configuration - with ProgressManager("Loading configuration...") as progress: - config = load_docs_config(dir) - ai_root = get_ai_root(dir) - - # Ensure submodules are available - if auto_pull: - with ProgressManager("Checking submodules...") as progress: - success, errors = ensure_submodules_available(ai_root, config, auto_clone=True) - if not success: - console.print(f"[red]Submodule errors: {errors}[/red]") - if not typer.confirm("Continue anyway?"): - raise typer.Abort() - - available_projects = config.list_projects() - - # Validate specific project if provided - if project and not validate_project_name(project, available_projects): - console.print(f"[red]Error: Project '{project}' not found[/red]") - console.print(f"Available projects: {', '.join(available_projects)}") - raise typer.Abort() - - # Determine projects to sync - if sync_all: - target_projects = available_projects - else: - target_projects = [project] - - # Find project directories - project_dirs = find_project_directories(ai_root, target_projects) - - # Show sync information - sync_table = Table(title="Documentation Sync Plan") - sync_table.add_column("Project", style="cyan") - sync_table.add_column("Directory", style="blue") - sync_table.add_column("Status", style="green") - sync_table.add_column("Components", style="yellow") - - for proj in target_projects: - if proj in project_dirs: - target_file = project_dirs[proj] / "claude.md" - status = "✓ Found" if target_file.parent.exists() else "⚠ Missing" - sync_table.add_row(proj, str(project_dirs[proj]), status, include) - else: - sync_table.add_row(proj, "Not found", "❌ Missing", "N/A") - - console.print(sync_table) - console.print() - - if dry_run: - console.print("[yellow]🔍 DRY RUN MODE - No files will be modified[/yellow]") - - # AI.GPT integration setup - if ai_gpt_integration: - console.print("[blue]🤖 AI.GPT Integration enabled[/blue]") - console.print("[dim]Enhanced documentation generation will be applied[/dim]") - console.print() - - # Perform sync operations - sync_results = [] - - for proj in track(target_projects, description="Syncing projects..."): - result = _sync_project( - proj, - project_dirs.get(proj), - include, - dry_run, - ai_gpt_integration, - verbose - ) - sync_results.append((proj, result)) - - # Show results summary - _show_sync_summary(sync_results, dry_run) - - except Exception as e: - if verbose: - console.print_exception() - else: - console.print(f"[red]Error: {e}[/red]") - raise typer.Abort() - - -def _sync_project( - project_name: str, - project_dir: Optional[Path], - include: str, - dry_run: bool, - ai_gpt_integration: bool, - verbose: bool, -) -> Dict: - """Sync a single project.""" - result = { - "project": project_name, - "success": False, - "message": "", - "output_file": None, - "lines": 0, - } - - if not project_dir: - result["message"] = "Directory not found" - return result - - if not project_dir.exists(): - result["message"] = f"Directory does not exist: {project_dir}" - return result - - target_file = project_dir / "claude.md" - - if dry_run: - result["success"] = True - result["message"] = f"Would sync to {target_file}" - result["output_file"] = target_file - return result - - try: - # Use the generate functionality - config = load_docs_config() - template_manager = DocumentationTemplateManager(config) - - # Generate documentation - content = template_manager.generate_documentation( - project_name=project_name, - components=[c.strip() for c in include.split(",")], - output_path=target_file, - ) - - result["success"] = True - result["message"] = "Successfully synced" - result["output_file"] = target_file - result["lines"] = len(content.splitlines()) - - if verbose: - console.print(f"[dim]✓ Synced {project_name} → {target_file}[/dim]") - - except Exception as e: - result["message"] = f"Sync failed: {str(e)}" - if verbose: - console.print(f"[red]✗ Failed {project_name}: {e}[/red]") - - return result - - -def _show_sync_summary(sync_results: List[tuple], dry_run: bool) -> None: - """Show sync operation summary.""" - success_count = sum(1 for _, result in sync_results if result["success"]) - total_count = len(sync_results) - error_count = total_count - success_count - - # Summary table - summary_table = Table(title="Sync Summary") - summary_table.add_column("Metric", style="cyan") - summary_table.add_column("Value", style="green") - - summary_table.add_row("Total Projects", str(total_count)) - summary_table.add_row("Successful", str(success_count)) - summary_table.add_row("Failed", str(error_count)) - - if not dry_run: - total_lines = sum(result["lines"] for _, result in sync_results if result["success"]) - summary_table.add_row("Total Lines Generated", str(total_lines)) - - console.print() - console.print(summary_table) - - # Show errors if any - if error_count > 0: - console.print() - console.print("[red]❌ Failed Projects:[/red]") - for project_name, result in sync_results: - if not result["success"]: - console.print(f" • {project_name}: {result['message']}") - - # Final status - console.print() - if dry_run: - console.print("[yellow]🔍 This was a dry run. To apply changes, run without --dry-run[/yellow]") - elif error_count == 0: - console.print("[green]🎉 All projects synced successfully![/green]") - else: - console.print(f"[yellow]⚠ Completed with {error_count} error(s)[/yellow]") - - -def _integrate_with_ai_gpt(project: str, components: List[str], verbose: bool) -> Optional[str]: - """Integrate with ai.gpt for enhanced documentation generation.""" - try: - from ..ai_provider import create_ai_provider - from ..persona import Persona - from ..config import Config - - config = Config() - ai_root = config.data_dir.parent if config.data_dir else Path.cwd() - - # Create AI provider - provider = config.get("default_provider", "ollama") - model = config.get(f"providers.{provider}.default_model", "qwen2.5") - - ai_provider = create_ai_provider(provider=provider, model=model) - persona = Persona(config.data_dir) - - # Create enhancement prompt - enhancement_prompt = f"""As an AI documentation expert, enhance the documentation for project '{project}'. - -Project type: {project} -Components to include: {', '.join(components)} - -Please provide: -1. Improved project description -2. Key features that should be highlighted -3. Usage examples -4. Integration points with other AI ecosystem projects -5. Development workflow recommendations - -Focus on making the documentation more comprehensive and user-friendly.""" - - if verbose: - console.print("[dim]Generating AI-enhanced content...[/dim]") - - # Get AI response - response, _ = persona.process_interaction( - "docs_system", - enhancement_prompt, - ai_provider - ) - - if verbose: - console.print("[green]✓ AI enhancement generated[/green]") - - return response - - except ImportError as e: - if verbose: - console.print(f"[yellow]AI integration unavailable: {e}[/yellow]") - return None - except Exception as e: - if verbose: - console.print(f"[red]AI integration error: {e}[/red]") - return None - - -# Add aliases for convenience -@docs_app.command("gen") -def generate_docs_alias( - project: str = typer.Option(..., "--project", "-p", help="Project name"), - output: Path = typer.Option(Path("./claude.md"), "--output", "-o", help="Output file path"), - include: str = typer.Option("core,specific", "--include", "-i", help="Components to include"), - ai_gpt_integration: bool = typer.Option(False, "--ai-gpt-integration", help="Enable ai.gpt integration"), - dry_run: bool = typer.Option(False, "--dry-run", help="Preview mode"), - verbose: bool = typer.Option(False, "--verbose", "-v", help="Verbose output"), -) -> None: - """Alias for generate command.""" - generate_docs(project, output, include, ai_gpt_integration, dry_run, verbose) - - -@docs_app.command("wiki") -def wiki_management( - action: str = typer.Option("update-auto", "--action", "-a", help="Action to perform (update-auto, build-home, status)"), - dir: Optional[Path] = typer.Option(None, "--dir", "-d", help="AI ecosystem root directory"), - auto_pull: bool = typer.Option(True, "--auto-pull/--no-auto-pull", help="Pull latest wiki changes before update"), - ai_enhance: bool = typer.Option(False, "--ai-enhance", help="Use AI to enhance wiki content"), - dry_run: bool = typer.Option(False, "--dry-run", help="Show what would be done without making changes"), - verbose: bool = typer.Option(False, "--verbose", "-v", help="Enable verbose output"), -) -> None: - """Manage AI wiki generation and updates. - - Automatically generates wiki pages from project claude.md files - and maintains the ai.wiki repository structure. - - Actions: - - update-auto: Generate auto/ directory with project summaries - - build-home: Rebuild Home.md from all projects - - status: Show wiki repository status - - Examples: - - # Update auto-generated content (with auto-pull) - aigpt docs wiki --action=update-auto - - # Update without pulling latest changes - aigpt docs wiki --action=update-auto --no-auto-pull - - # Update with custom directory - aigpt docs wiki --action=update-auto --dir ~/ai/ai - - # Preview what would be generated - aigpt docs wiki --action=update-auto --dry-run - - # Check wiki status - aigpt docs wiki --action=status - """ - try: - # Load configuration - with ProgressManager("Loading configuration...") as progress: - config = load_docs_config(dir) - ai_root = get_ai_root(dir) - - # Initialize wiki generator - wiki_generator = WikiGenerator(config, ai_root) - - if not wiki_generator.wiki_root: - console.print("[red]❌ ai.wiki directory not found[/red]") - console.print(f"Expected location: {ai_root / 'ai.wiki'}") - console.print("Please ensure ai.wiki submodule is cloned") - raise typer.Abort() - - # Show wiki information - if verbose: - console.print(f"[blue]📁 Wiki root: {wiki_generator.wiki_root}[/blue]") - console.print(f"[blue]📁 AI root: {ai_root}[/blue]") - - if action == "status": - _show_wiki_status(wiki_generator, ai_root) - - elif action == "update-auto": - if dry_run: - console.print("[yellow]🔍 DRY RUN MODE - No files will be modified[/yellow]") - if auto_pull: - console.print("[blue]📥 Would pull latest wiki changes[/blue]") - # Show what would be generated - project_dirs = find_project_directories(ai_root, config.list_projects()) - console.print(f"[blue]📋 Would generate {len(project_dirs)} project pages:[/blue]") - for project_name in project_dirs.keys(): - console.print(f" • auto/{project_name}.md") - console.print(" • Home.md") - else: - with ProgressManager("Updating wiki auto directory...") as progress: - success, updated_files = wiki_generator.update_wiki_auto_directory( - auto_pull=auto_pull, - ai_enhance=ai_enhance - ) - - if success: - console.print(f"[green]✅ Successfully updated {len(updated_files)} files[/green]") - if verbose: - for file in updated_files: - console.print(f" • {file}") - else: - console.print("[red]❌ Failed to update wiki[/red]") - raise typer.Abort() - - elif action == "build-home": - console.print("[blue]🏠 Building Home.md...[/blue]") - # This would be implemented to rebuild just Home.md - console.print("[yellow]⚠ build-home action not yet implemented[/yellow]") - - else: - console.print(f"[red]Unknown action: {action}[/red]") - console.print("Available actions: update-auto, build-home, status") - raise typer.Abort() - - except Exception as e: - if verbose: - console.print_exception() - else: - console.print(f"[red]Error: {e}[/red]") - raise typer.Abort() - - -def _show_wiki_status(wiki_generator: WikiGenerator, ai_root: Path) -> None: - """Show wiki repository status.""" - console.print("[blue]📊 AI Wiki Status[/blue]") - - # Check wiki directory structure - wiki_root = wiki_generator.wiki_root - status_table = Table(title="Wiki Directory Status") - status_table.add_column("Directory", style="cyan") - status_table.add_column("Status", style="green") - status_table.add_column("Files", style="yellow") - - directories = ["auto", "claude", "manual"] - for dir_name in directories: - dir_path = wiki_root / dir_name - if dir_path.exists(): - file_count = len(list(dir_path.glob("*.md"))) - status = "✓ Exists" - files = f"{file_count} files" - else: - status = "❌ Missing" - files = "N/A" - - status_table.add_row(dir_name, status, files) - - # Check Home.md - home_path = wiki_root / "Home.md" - home_status = "✓ Exists" if home_path.exists() else "❌ Missing" - status_table.add_row("Home.md", home_status, "1 file" if home_path.exists() else "N/A") - - console.print(status_table) - - # Show project coverage - config = wiki_generator.config - project_dirs = find_project_directories(ai_root, config.list_projects()) - auto_dir = wiki_root / "auto" - - if auto_dir.exists(): - existing_wiki_files = set(f.stem for f in auto_dir.glob("*.md")) - available_projects = set(project_dirs.keys()) - - missing = available_projects - existing_wiki_files - orphaned = existing_wiki_files - available_projects - - console.print(f"\n[blue]📋 Project Coverage:[/blue]") - console.print(f" • Total projects: {len(available_projects)}") - console.print(f" • Wiki pages: {len(existing_wiki_files)}") - - if missing: - console.print(f" • Missing wiki pages: {', '.join(missing)}") - if orphaned: - console.print(f" • Orphaned wiki pages: {', '.join(orphaned)}") - - if not missing and not orphaned: - console.print(f" • ✅ All projects have wiki pages") - - -@docs_app.command("config") -def docs_config( - action: str = typer.Option("show", "--action", "-a", help="Action (show, set-dir, clear-dir)"), - value: Optional[str] = typer.Option(None, "--value", "-v", help="Value to set"), - verbose: bool = typer.Option(False, "--verbose", help="Enable verbose output"), -) -> None: - """Manage documentation configuration. - - Configure default settings for aigpt docs commands to avoid - repeating options like --dir every time. - - Actions: - - show: Display current configuration - - set-dir: Set default AI root directory - - clear-dir: Clear default AI root directory - - Examples: - - # Show current config - aigpt docs config --action=show - - # Set default directory - aigpt docs config --action=set-dir --value=~/ai/ai - - # Clear default directory - aigpt docs config --action=clear-dir - """ - try: - from ..config import Config - config = Config() - - if action == "show": - console.print("[blue]📁 AI Documentation Configuration[/blue]") - - # Show current ai_root resolution - current_ai_root = get_ai_root() - console.print(f"[green]Current AI root: {current_ai_root}[/green]") - - # Show resolution method - import os - env_dir = os.getenv("AI_DOCS_DIR") - config_dir = config.get("docs.ai_root") - - resolution_table = Table(title="Directory Resolution") - resolution_table.add_column("Method", style="cyan") - resolution_table.add_column("Value", style="yellow") - resolution_table.add_column("Status", style="green") - - resolution_table.add_row("Environment (AI_DOCS_DIR)", env_dir or "Not set", "✓ Active" if env_dir else "Not used") - resolution_table.add_row("Config file (docs.ai_root)", config_dir or "Not set", "✓ Active" if config_dir and not env_dir else "Not used") - resolution_table.add_row("Default (relative)", str(Path(__file__).parent.parent.parent.parent.parent), "✓ Active" if not env_dir and not config_dir else "Not used") - - console.print(resolution_table) - - if verbose: - console.print(f"\n[dim]Config file: {config.config_file}[/dim]") - - elif action == "set-dir": - if not value: - console.print("[red]Error: --value is required for set-dir action[/red]") - raise typer.Abort() - - # Expand and validate path - ai_root_path = Path(value).expanduser().absolute() - - if not ai_root_path.exists(): - console.print(f"[yellow]Warning: Directory does not exist: {ai_root_path}[/yellow]") - if not typer.confirm("Set anyway?"): - raise typer.Abort() - - # Check if ai.json exists - ai_json_path = ai_root_path / "ai.json" - if not ai_json_path.exists(): - console.print(f"[yellow]Warning: ai.json not found at: {ai_json_path}[/yellow]") - if not typer.confirm("Set anyway?"): - raise typer.Abort() - - # Save to config - config.set("docs.ai_root", str(ai_root_path)) - - console.print(f"[green]✅ Set default AI root directory: {ai_root_path}[/green]") - console.print("[dim]This will be used when --dir is not specified and AI_DOCS_DIR is not set[/dim]") - - elif action == "clear-dir": - config.delete("docs.ai_root") - - console.print("[green]✅ Cleared default AI root directory[/green]") - console.print("[dim]Will use default relative path when --dir and AI_DOCS_DIR are not set[/dim]") - - else: - console.print(f"[red]Unknown action: {action}[/red]") - console.print("Available actions: show, set-dir, clear-dir") - raise typer.Abort() - - except Exception as e: - if verbose: - console.print_exception() - else: - console.print(f"[red]Error: {e}[/red]") - raise typer.Abort() - - -# Export the docs app -__all__ = ["docs_app"] \ No newline at end of file diff --git a/python_backup/src/aigpt/commands/submodules.py b/python_backup/src/aigpt/commands/submodules.py deleted file mode 100644 index 7e932f4..0000000 --- a/python_backup/src/aigpt/commands/submodules.py +++ /dev/null @@ -1,305 +0,0 @@ -"""Submodule management commands for ai.gpt.""" - -from pathlib import Path -from typing import Dict, List, Optional, Tuple -import subprocess -import json - -import typer -from rich.console import Console -from rich.panel import Panel -from rich.table import Table - -from ..docs.config import get_ai_root, load_docs_config -from ..docs.git_utils import ( - check_git_repository, - get_git_branch, - get_git_remote_url -) -from ..docs.utils import run_command - -console = Console() -submodules_app = typer.Typer(help="Submodule management for AI ecosystem") - - -def get_submodules_from_gitmodules(repo_path: Path) -> Dict[str, str]: - """Parse .gitmodules file to get submodule information.""" - gitmodules_path = repo_path / ".gitmodules" - if not gitmodules_path.exists(): - return {} - - submodules = {} - current_name = None - - with open(gitmodules_path, 'r') as f: - for line in f: - line = line.strip() - if line.startswith('[submodule "') and line.endswith('"]'): - current_name = line[12:-2] # Extract module name - elif line.startswith('path = ') and current_name: - path = line[7:] # Extract path - submodules[current_name] = path - current_name = None - - return submodules - - -def get_branch_for_module(config, module_name: str) -> str: - """Get target branch for a module from ai.json.""" - project_info = config.get_project_info(module_name) - if project_info and project_info.branch: - return project_info.branch - return "main" # Default branch - - -@submodules_app.command("list") -def list_submodules( - dir: Optional[Path] = typer.Option(None, "--dir", "-d", help="AI ecosystem root directory"), - verbose: bool = typer.Option(False, "--verbose", "-v", help="Show detailed information") -): - """List all submodules and their status.""" - try: - config = load_docs_config(dir) - ai_root = get_ai_root(dir) - - if not check_git_repository(ai_root): - console.print("[red]Error: Not a git repository[/red]") - raise typer.Abort() - - submodules = get_submodules_from_gitmodules(ai_root) - - if not submodules: - console.print("[yellow]No submodules found[/yellow]") - return - - table = Table(title="Submodules Status") - table.add_column("Module", style="cyan") - table.add_column("Path", style="blue") - table.add_column("Branch", style="green") - table.add_column("Status", style="yellow") - - for module_name, module_path in submodules.items(): - full_path = ai_root / module_path - - if not full_path.exists(): - status = "❌ Missing" - branch = "N/A" - else: - branch = get_git_branch(full_path) or "detached" - - # Check if submodule is up to date - returncode, stdout, stderr = run_command( - ["git", "submodule", "status", module_path], - cwd=ai_root - ) - - if returncode == 0 and stdout: - status_char = stdout[0] if stdout else ' ' - if status_char == ' ': - status = "✅ Clean" - elif status_char == '+': - status = "📝 Modified" - elif status_char == '-': - status = "❌ Not initialized" - elif status_char == 'U': - status = "⚠️ Conflicts" - else: - status = "❓ Unknown" - else: - status = "❓ Unknown" - - target_branch = get_branch_for_module(config, module_name) - branch_display = f"{branch}" - if branch != target_branch: - branch_display += f" (target: {target_branch})" - - table.add_row(module_name, module_path, branch_display, status) - - console.print(table) - - if verbose: - console.print(f"\n[dim]Total submodules: {len(submodules)}[/dim]") - console.print(f"[dim]Repository root: {ai_root}[/dim]") - - except Exception as e: - console.print(f"[red]Error: {e}[/red]") - raise typer.Abort() - - -@submodules_app.command("update") -def update_submodules( - module: Optional[str] = typer.Option(None, "--module", "-m", help="Update specific submodule"), - all: bool = typer.Option(False, "--all", "-a", help="Update all submodules"), - dir: Optional[Path] = typer.Option(None, "--dir", "-d", help="AI ecosystem root directory"), - dry_run: bool = typer.Option(False, "--dry-run", help="Show what would be done"), - auto_commit: bool = typer.Option(False, "--auto-commit", help="Auto-commit changes"), - verbose: bool = typer.Option(False, "--verbose", "-v", help="Show detailed output") -): - """Update submodules to latest commits.""" - if not module and not all: - console.print("[red]Error: Either --module or --all is required[/red]") - raise typer.Abort() - - if module and all: - console.print("[red]Error: Cannot use both --module and --all[/red]") - raise typer.Abort() - - try: - config = load_docs_config(dir) - ai_root = get_ai_root(dir) - - if not check_git_repository(ai_root): - console.print("[red]Error: Not a git repository[/red]") - raise typer.Abort() - - submodules = get_submodules_from_gitmodules(ai_root) - - if not submodules: - console.print("[yellow]No submodules found[/yellow]") - return - - # Determine which modules to update - if all: - modules_to_update = list(submodules.keys()) - else: - if module not in submodules: - console.print(f"[red]Error: Submodule '{module}' not found[/red]") - console.print(f"Available modules: {', '.join(submodules.keys())}") - raise typer.Abort() - modules_to_update = [module] - - if dry_run: - console.print("[yellow]🔍 DRY RUN MODE - No changes will be made[/yellow]") - - console.print(f"[cyan]Updating {len(modules_to_update)} submodule(s)...[/cyan]") - - updated_modules = [] - - for module_name in modules_to_update: - module_path = submodules[module_name] - full_path = ai_root / module_path - target_branch = get_branch_for_module(config, module_name) - - console.print(f"\n[blue]📦 Processing: {module_name}[/blue]") - - if not full_path.exists(): - console.print(f"[red]❌ Module directory not found: {module_path}[/red]") - continue - - # Get current commit - current_commit = None - returncode, stdout, stderr = run_command( - ["git", "rev-parse", "HEAD"], - cwd=full_path - ) - if returncode == 0: - current_commit = stdout.strip()[:8] - - if dry_run: - console.print(f"[yellow]🔍 Would update {module_name} to branch {target_branch}[/yellow]") - if current_commit: - console.print(f"[dim]Current: {current_commit}[/dim]") - continue - - # Fetch latest changes - console.print(f"[dim]Fetching latest changes...[/dim]") - returncode, stdout, stderr = run_command( - ["git", "fetch", "origin"], - cwd=full_path - ) - - if returncode != 0: - console.print(f"[red]❌ Failed to fetch: {stderr}[/red]") - continue - - # Check if update is needed - returncode, stdout, stderr = run_command( - ["git", "rev-parse", f"origin/{target_branch}"], - cwd=full_path - ) - - if returncode != 0: - console.print(f"[red]❌ Branch {target_branch} not found on remote[/red]") - continue - - latest_commit = stdout.strip()[:8] - - if current_commit == latest_commit: - console.print(f"[green]✅ Already up to date[/green]") - continue - - # Switch to target branch and pull - console.print(f"[dim]Switching to branch {target_branch}...[/dim]") - returncode, stdout, stderr = run_command( - ["git", "checkout", target_branch], - cwd=full_path - ) - - if returncode != 0: - console.print(f"[red]❌ Failed to checkout {target_branch}: {stderr}[/red]") - continue - - returncode, stdout, stderr = run_command( - ["git", "pull", "origin", target_branch], - cwd=full_path - ) - - if returncode != 0: - console.print(f"[red]❌ Failed to pull: {stderr}[/red]") - continue - - # Get new commit - returncode, stdout, stderr = run_command( - ["git", "rev-parse", "HEAD"], - cwd=full_path - ) - new_commit = stdout.strip()[:8] if returncode == 0 else "unknown" - - # Stage the submodule update - returncode, stdout, stderr = run_command( - ["git", "add", module_path], - cwd=ai_root - ) - - console.print(f"[green]✅ Updated {module_name} ({current_commit} → {new_commit})[/green]") - updated_modules.append((module_name, current_commit, new_commit)) - - # Summary - if updated_modules: - console.print(f"\n[green]🎉 Successfully updated {len(updated_modules)} module(s)[/green]") - - if verbose: - for module_name, old_commit, new_commit in updated_modules: - console.print(f" • {module_name}: {old_commit} → {new_commit}") - - if auto_commit and not dry_run: - console.print("[blue]💾 Auto-committing changes...[/blue]") - commit_message = f"Update submodules\n\n📦 Updated modules: {len(updated_modules)}\n" - for module_name, old_commit, new_commit in updated_modules: - commit_message += f"- {module_name}: {old_commit} → {new_commit}\n" - commit_message += "\n🤖 Generated with ai.gpt submodules update" - - returncode, stdout, stderr = run_command( - ["git", "commit", "-m", commit_message], - cwd=ai_root - ) - - if returncode == 0: - console.print("[green]✅ Changes committed successfully[/green]") - else: - console.print(f"[red]❌ Failed to commit: {stderr}[/red]") - elif not dry_run: - console.print("[yellow]💾 Changes staged but not committed[/yellow]") - console.print("Run with --auto-commit to commit automatically") - elif not dry_run: - console.print("[yellow]No modules needed updating[/yellow]") - - except Exception as e: - console.print(f"[red]Error: {e}[/red]") - if verbose: - console.print_exception() - raise typer.Abort() - - -# Export the submodules app -__all__ = ["submodules_app"] \ No newline at end of file diff --git a/python_backup/src/aigpt/commands/tokens.py b/python_backup/src/aigpt/commands/tokens.py deleted file mode 100644 index 1b6178c..0000000 --- a/python_backup/src/aigpt/commands/tokens.py +++ /dev/null @@ -1,440 +0,0 @@ -"""Claude Code token usage and cost analysis commands.""" - -from pathlib import Path -from typing import Dict, List, Optional, Tuple -from datetime import datetime, timedelta -import json -import sqlite3 - -import typer -from rich.console import Console -from rich.panel import Panel -from rich.table import Table -from rich.progress import track - -console = Console() -tokens_app = typer.Typer(help="Claude Code token usage and cost analysis") - -# Claude Code pricing (estimated rates in USD) -CLAUDE_PRICING = { - "input_tokens_per_1k": 0.003, # $3 per 1M input tokens - "output_tokens_per_1k": 0.015, # $15 per 1M output tokens - "usd_to_jpy": 150 # Exchange rate -} - - -def find_claude_data_dir() -> Optional[Path]: - """Find Claude Code data directory.""" - possible_paths = [ - Path.home() / ".claude", - Path.home() / ".config" / "claude", - Path.cwd() / ".claude" - ] - - for path in possible_paths: - if path.exists() and (path / "projects").exists(): - return path - - return None - - -def parse_jsonl_files(claude_dir: Path) -> List[Dict]: - """Parse Claude Code JSONL files safely.""" - records = [] - projects_dir = claude_dir / "projects" - - if not projects_dir.exists(): - return records - - # Find all .jsonl files recursively - jsonl_files = list(projects_dir.rglob("*.jsonl")) - - for jsonl_file in track(jsonl_files, description="Reading Claude data..."): - try: - with open(jsonl_file, 'r', encoding='utf-8') as f: - for line_num, line in enumerate(f, 1): - line = line.strip() - if not line: - continue - - try: - record = json.loads(line) - # Only include records with usage information - if (record.get('type') == 'assistant' and - 'message' in record and - 'usage' in record.get('message', {})): - records.append(record) - except json.JSONDecodeError: - # Skip malformed JSON lines - continue - - except (IOError, PermissionError): - # Skip files we can't read - continue - - return records - - -def calculate_costs(records: List[Dict]) -> Dict[str, float]: - """Calculate token costs from usage records.""" - total_input_tokens = 0 - total_output_tokens = 0 - total_cost_usd = 0 - - for record in records: - try: - usage = record.get('message', {}).get('usage', {}) - - input_tokens = int(usage.get('input_tokens', 0)) - output_tokens = int(usage.get('output_tokens', 0)) - - # Calculate cost if not provided - cost_usd = record.get('costUSD') - if cost_usd is None: - input_cost = (input_tokens / 1000) * CLAUDE_PRICING["input_tokens_per_1k"] - output_cost = (output_tokens / 1000) * CLAUDE_PRICING["output_tokens_per_1k"] - cost_usd = input_cost + output_cost - else: - cost_usd = float(cost_usd) - - total_input_tokens += input_tokens - total_output_tokens += output_tokens - total_cost_usd += cost_usd - - except (ValueError, TypeError, KeyError): - # Skip records with invalid data - continue - - return { - 'input_tokens': total_input_tokens, - 'output_tokens': total_output_tokens, - 'total_tokens': total_input_tokens + total_output_tokens, - 'cost_usd': total_cost_usd, - 'cost_jpy': total_cost_usd * CLAUDE_PRICING["usd_to_jpy"] - } - - -def group_by_date(records: List[Dict]) -> Dict[str, Dict]: - """Group records by date and calculate daily costs.""" - daily_stats = {} - - for record in records: - try: - timestamp = record.get('timestamp') - if not timestamp: - continue - - # Parse timestamp and convert to JST - dt = datetime.fromisoformat(timestamp.replace('Z', '+00:00')) - # Convert to JST (UTC+9) - jst_dt = dt + timedelta(hours=9) - date_key = jst_dt.strftime('%Y-%m-%d') - - if date_key not in daily_stats: - daily_stats[date_key] = [] - - daily_stats[date_key].append(record) - - except (ValueError, TypeError): - continue - - # Calculate costs for each day - daily_costs = {} - for date_key, day_records in daily_stats.items(): - daily_costs[date_key] = calculate_costs(day_records) - - return daily_costs - - -@tokens_app.command("summary") -def token_summary( - period: str = typer.Option("all", help="Period: today, week, month, all"), - claude_dir: Optional[Path] = typer.Option(None, "--claude-dir", help="Claude data directory"), - show_details: bool = typer.Option(False, "--details", help="Show detailed breakdown"), - format: str = typer.Option("table", help="Output format: table, json") -): - """Show Claude Code token usage summary and estimated costs.""" - - # Find Claude data directory - if claude_dir is None: - claude_dir = find_claude_data_dir() - - if claude_dir is None: - console.print("[red]❌ Claude Code data directory not found[/red]") - console.print("[dim]Looked in: ~/.claude, ~/.config/claude, ./.claude[/dim]") - raise typer.Abort() - - if not claude_dir.exists(): - console.print(f"[red]❌ Directory not found: {claude_dir}[/red]") - raise typer.Abort() - - console.print(f"[cyan]📊 Analyzing Claude Code usage from: {claude_dir}[/cyan]") - - # Parse data - records = parse_jsonl_files(claude_dir) - - if not records: - console.print("[yellow]⚠️ No usage data found[/yellow]") - return - - # Filter by period - now = datetime.now() - filtered_records = [] - - if period == "today": - today = now.strftime('%Y-%m-%d') - for record in records: - try: - timestamp = record.get('timestamp') - if timestamp: - dt = datetime.fromisoformat(timestamp.replace('Z', '+00:00')) - jst_dt = dt + timedelta(hours=9) - if jst_dt.strftime('%Y-%m-%d') == today: - filtered_records.append(record) - except (ValueError, TypeError): - continue - - elif period == "week": - week_ago = now - timedelta(days=7) - for record in records: - try: - timestamp = record.get('timestamp') - if timestamp: - dt = datetime.fromisoformat(timestamp.replace('Z', '+00:00')) - jst_dt = dt + timedelta(hours=9) - if jst_dt.date() >= week_ago.date(): - filtered_records.append(record) - except (ValueError, TypeError): - continue - - elif period == "month": - month_ago = now - timedelta(days=30) - for record in records: - try: - timestamp = record.get('timestamp') - if timestamp: - dt = datetime.fromisoformat(timestamp.replace('Z', '+00:00')) - jst_dt = dt + timedelta(hours=9) - if jst_dt.date() >= month_ago.date(): - filtered_records.append(record) - except (ValueError, TypeError): - continue - - else: # all - filtered_records = records - - # Calculate total costs - total_stats = calculate_costs(filtered_records) - - if format == "json": - # JSON output - output = { - "period": period, - "total_records": len(filtered_records), - "input_tokens": total_stats['input_tokens'], - "output_tokens": total_stats['output_tokens'], - "total_tokens": total_stats['total_tokens'], - "estimated_cost_usd": round(total_stats['cost_usd'], 2), - "estimated_cost_jpy": round(total_stats['cost_jpy'], 0) - } - console.print(json.dumps(output, indent=2)) - return - - # Table output - console.print(Panel( - f"[bold cyan]Claude Code Token Usage Report[/bold cyan]\n\n" - f"Period: {period.title()}\n" - f"Data source: {claude_dir}", - title="📊 Usage Analysis", - border_style="cyan" - )) - - # Summary table - summary_table = Table(title="Token Summary") - summary_table.add_column("Metric", style="cyan") - summary_table.add_column("Value", style="green") - - summary_table.add_row("Input Tokens", f"{total_stats['input_tokens']:,}") - summary_table.add_row("Output Tokens", f"{total_stats['output_tokens']:,}") - summary_table.add_row("Total Tokens", f"{total_stats['total_tokens']:,}") - summary_table.add_row("", "") # Separator - summary_table.add_row("Estimated Cost (USD)", f"${total_stats['cost_usd']:.2f}") - summary_table.add_row("Estimated Cost (JPY)", f"¥{total_stats['cost_jpy']:,.0f}") - summary_table.add_row("Records Analyzed", str(len(filtered_records))) - - console.print(summary_table) - - # Show daily breakdown if requested - if show_details: - daily_costs = group_by_date(filtered_records) - - if daily_costs: - console.print("\n") - daily_table = Table(title="Daily Breakdown") - daily_table.add_column("Date", style="cyan") - daily_table.add_column("Input Tokens", style="blue") - daily_table.add_column("Output Tokens", style="green") - daily_table.add_column("Total Tokens", style="yellow") - daily_table.add_column("Cost (JPY)", style="red") - - for date in sorted(daily_costs.keys(), reverse=True): - stats = daily_costs[date] - daily_table.add_row( - date, - f"{stats['input_tokens']:,}", - f"{stats['output_tokens']:,}", - f"{stats['total_tokens']:,}", - f"¥{stats['cost_jpy']:,.0f}" - ) - - console.print(daily_table) - - # Warning about estimates - console.print("\n[dim]💡 Note: Costs are estimates based on Claude API pricing.[/dim]") - console.print("[dim] Actual Claude Code subscription costs may differ.[/dim]") - - -@tokens_app.command("daily") -def daily_breakdown( - days: int = typer.Option(7, help="Number of days to show"), - claude_dir: Optional[Path] = typer.Option(None, "--claude-dir", help="Claude data directory"), -): - """Show daily token usage breakdown.""" - - # Find Claude data directory - if claude_dir is None: - claude_dir = find_claude_data_dir() - - if claude_dir is None: - console.print("[red]❌ Claude Code data directory not found[/red]") - raise typer.Abort() - - console.print(f"[cyan]📅 Daily token usage (last {days} days)[/cyan]") - - # Parse data - records = parse_jsonl_files(claude_dir) - - if not records: - console.print("[yellow]⚠️ No usage data found[/yellow]") - return - - # Group by date - daily_costs = group_by_date(records) - - # Get recent days - recent_dates = sorted(daily_costs.keys(), reverse=True)[:days] - - if not recent_dates: - console.print("[yellow]No recent usage data found[/yellow]") - return - - # Create table - table = Table(title=f"Daily Usage (Last {len(recent_dates)} days)") - table.add_column("Date", style="cyan") - table.add_column("Input", style="blue") - table.add_column("Output", style="green") - table.add_column("Total", style="yellow") - table.add_column("Cost (JPY)", style="red") - - total_cost = 0 - for date in recent_dates: - stats = daily_costs[date] - total_cost += stats['cost_jpy'] - - table.add_row( - date, - f"{stats['input_tokens']:,}", - f"{stats['output_tokens']:,}", - f"{stats['total_tokens']:,}", - f"¥{stats['cost_jpy']:,.0f}" - ) - - # Add total row - table.add_row( - "──────────", - "────────", - "────────", - "────────", - "──────────" - ) - table.add_row( - "【Total】", - "", - "", - "", - f"¥{total_cost:,.0f}" - ) - - console.print(table) - console.print(f"\n[green]Total estimated cost for {len(recent_dates)} days: ¥{total_cost:,.0f}[/green]") - - -@tokens_app.command("status") -def token_status( - claude_dir: Optional[Path] = typer.Option(None, "--claude-dir", help="Claude data directory"), -): - """Check Claude Code data availability and basic stats.""" - - # Find Claude data directory - if claude_dir is None: - claude_dir = find_claude_data_dir() - - console.print("[cyan]🔍 Claude Code Data Status[/cyan]") - - if claude_dir is None: - console.print("[red]❌ Claude Code data directory not found[/red]") - console.print("\n[yellow]Searched locations:[/yellow]") - console.print(" • ~/.claude") - console.print(" • ~/.config/claude") - console.print(" • ./.claude") - console.print("\n[dim]Make sure Claude Code is installed and has been used.[/dim]") - return - - console.print(f"[green]✅ Found data directory: {claude_dir}[/green]") - - projects_dir = claude_dir / "projects" - if not projects_dir.exists(): - console.print("[yellow]⚠️ No projects directory found[/yellow]") - return - - # Count files - jsonl_files = list(projects_dir.rglob("*.jsonl")) - console.print(f"[blue]📂 Found {len(jsonl_files)} JSONL files[/blue]") - - if jsonl_files: - # Parse sample to check data quality - sample_records = [] - for jsonl_file in jsonl_files[:3]: # Check first 3 files - try: - with open(jsonl_file, 'r') as f: - for line in f: - if line.strip(): - try: - record = json.loads(line.strip()) - sample_records.append(record) - if len(sample_records) >= 10: - break - except json.JSONDecodeError: - continue - if len(sample_records) >= 10: - break - except IOError: - continue - - usage_records = [r for r in sample_records - if r.get('type') == 'assistant' and - 'usage' in r.get('message', {})] - - console.print(f"[green]📊 Found {len(usage_records)} usage records in sample[/green]") - - if usage_records: - console.print("[blue]✅ Data appears valid for cost analysis[/blue]") - console.print("\n[dim]Run 'aigpt tokens summary' for full analysis[/dim]") - else: - console.print("[yellow]⚠️ No usage data found in sample[/yellow]") - else: - console.print("[yellow]⚠️ No JSONL files found[/yellow]") - - -# Export the tokens app -__all__ = ["tokens_app"] \ No newline at end of file diff --git a/python_backup/src/aigpt/config.py b/python_backup/src/aigpt/config.py deleted file mode 100644 index f3ef550..0000000 --- a/python_backup/src/aigpt/config.py +++ /dev/null @@ -1,184 +0,0 @@ -"""Configuration management for ai.gpt""" - -import json -import os -from pathlib import Path -from typing import Optional, Dict, Any -import logging - - -class Config: - """Manages configuration settings""" - - def __init__(self, config_dir: Optional[Path] = None): - if config_dir is None: - config_dir = Path.home() / ".config" / "syui" / "ai" / "gpt" - - self.config_dir = config_dir - self.config_file = config_dir / "config.json" - self.data_dir = config_dir / "data" - - # Create directories if they don't exist - self.config_dir.mkdir(parents=True, exist_ok=True) - self.data_dir.mkdir(parents=True, exist_ok=True) - - self.logger = logging.getLogger(__name__) - self._config: Dict[str, Any] = {} - self._load_config() - - def _load_config(self): - """Load configuration from file""" - if self.config_file.exists(): - try: - with open(self.config_file, 'r', encoding='utf-8') as f: - self._config = json.load(f) - except Exception as e: - self.logger.error(f"Failed to load config: {e}") - self._config = {} - else: - # Initialize with default config - self._config = { - "providers": { - "openai": { - "api_key": None, - "default_model": "gpt-4o-mini", - "system_prompt": None - }, - "ollama": { - "host": "http://localhost:11434", - "default_model": "qwen3:latest", - "system_prompt": None - } - }, - "mcp": { - "enabled": True, - "auto_detect": True, - "servers": { - "ai_gpt": { - "name": "ai.gpt MCP Server", - "base_url": "http://localhost:8001", - "endpoints": { - "get_memories": "/get_memories", - "search_memories": "/search_memories", - "get_contextual_memories": "/get_contextual_memories", - "process_interaction": "/process_interaction", - "get_relationship": "/get_relationship", - "get_all_relationships": "/get_all_relationships", - "get_persona_state": "/get_persona_state", - "get_fortune": "/get_fortune", - "run_maintenance": "/run_maintenance", - "execute_command": "/execute_command", - "analyze_file": "/analyze_file", - "remote_shell": "/remote_shell", - "ai_bot_status": "/ai_bot_status" - }, - "timeout": 10.0 - }, - "ai_card": { - "name": "ai.card MCP Server", - "base_url": "http://localhost:8000", - "endpoints": { - "health": "/health", - "get_user_cards": "/api/cards/user", - "gacha": "/api/gacha", - "sync_atproto": "/api/sync" - }, - "timeout": 5.0 - } - } - }, - "atproto": { - "handle": None, - "password": None, - "host": "https://bsky.social" - }, - "default_provider": "ollama" - } - self._save_config() - - def _save_config(self): - """Save configuration to file""" - try: - with open(self.config_file, 'w', encoding='utf-8') as f: - json.dump(self._config, f, indent=2) - except Exception as e: - self.logger.error(f"Failed to save config: {e}") - - def get(self, key: str, default: Any = None) -> Any: - """Get configuration value using dot notation""" - keys = key.split('.') - value = self._config - - for k in keys: - if isinstance(value, dict) and k in value: - value = value[k] - else: - return default - - return value - - def set(self, key: str, value: Any): - """Set configuration value using dot notation""" - keys = key.split('.') - config = self._config - - # Navigate to the parent dictionary - for k in keys[:-1]: - if k not in config: - config[k] = {} - config = config[k] - - # Set the value - config[keys[-1]] = value - self._save_config() - - def delete(self, key: str) -> bool: - """Delete configuration value""" - keys = key.split('.') - config = self._config - - # Navigate to the parent dictionary - for k in keys[:-1]: - if k not in config: - return False - config = config[k] - - # Delete the key if it exists - if keys[-1] in config: - del config[keys[-1]] - self._save_config() - return True - - return False - - def list_keys(self, prefix: str = "") -> list[str]: - """List all configuration keys with optional prefix""" - def _get_keys(config: dict, current_prefix: str = "") -> list[str]: - keys = [] - for k, v in config.items(): - full_key = f"{current_prefix}.{k}" if current_prefix else k - if isinstance(v, dict): - keys.extend(_get_keys(v, full_key)) - else: - keys.append(full_key) - return keys - - all_keys = _get_keys(self._config) - - if prefix: - return [k for k in all_keys if k.startswith(prefix)] - return all_keys - - def get_api_key(self, provider: str) -> Optional[str]: - """Get API key for a specific provider""" - key = self.get(f"providers.{provider}.api_key") - - # Also check environment variables - if not key and provider == "openai": - key = os.getenv("OPENAI_API_KEY") - - return key - - def get_provider_config(self, provider: str) -> Dict[str, Any]: - """Get complete configuration for a provider""" - return self.get(f"providers.{provider}", {}) \ No newline at end of file diff --git a/python_backup/src/aigpt/docs/__init__.py b/python_backup/src/aigpt/docs/__init__.py deleted file mode 100644 index 8e4a8aa..0000000 --- a/python_backup/src/aigpt/docs/__init__.py +++ /dev/null @@ -1 +0,0 @@ -"""Documentation management module for ai.gpt.""" \ No newline at end of file diff --git a/python_backup/src/aigpt/docs/config.py b/python_backup/src/aigpt/docs/config.py deleted file mode 100644 index 77e7040..0000000 --- a/python_backup/src/aigpt/docs/config.py +++ /dev/null @@ -1,150 +0,0 @@ -"""Configuration management for documentation system.""" - -import json -from pathlib import Path -from typing import Any, Dict, List, Optional, Union - -from pydantic import BaseModel, Field - - -class GitConfig(BaseModel): - """Git configuration.""" - host: str = "git.syui.ai" - protocol: str = "ssh" - - -class AtprotoConfig(BaseModel): - """Atproto configuration.""" - host: str = "syu.is" - protocol: str = "at" - at_url: str = "at://ai.syu.is" - did: str = "did:plc:6qyecktefllvenje24fcxnie" - web: str = "https://web.syu.is/@ai" - - -class ProjectMetadata(BaseModel): - """Project metadata.""" - last_updated: str - structure_version: str - domain: List[str] - git: GitConfig - atproto: AtprotoConfig - - -class ProjectInfo(BaseModel): - """Individual project information.""" - type: Union[str, List[str]] # Support both string and list - text: str - status: str - branch: str = "main" - git_url: Optional[str] = None - detailed_specs: Optional[str] = None - data_reference: Optional[str] = None - features: Optional[str] = None - - -class AIConfig(BaseModel): - """AI projects configuration.""" - ai: ProjectInfo - gpt: ProjectInfo - os: ProjectInfo - game: ProjectInfo - bot: ProjectInfo - moji: ProjectInfo - card: ProjectInfo - api: ProjectInfo - log: ProjectInfo - verse: ProjectInfo - shell: ProjectInfo - - -class DocsConfig(BaseModel): - """Main documentation configuration model.""" - version: int = 2 - metadata: ProjectMetadata - ai: AIConfig - data: Dict[str, Any] = Field(default_factory=dict) - deprecated: Dict[str, Any] = Field(default_factory=dict) - - @classmethod - def load_from_file(cls, config_path: Path) -> "DocsConfig": - """Load configuration from ai.json file.""" - if not config_path.exists(): - raise FileNotFoundError(f"Configuration file not found: {config_path}") - - with open(config_path, "r", encoding="utf-8") as f: - data = json.load(f) - - return cls(**data) - - def get_project_info(self, project_name: str) -> Optional[ProjectInfo]: - """Get project information by name.""" - return getattr(self.ai, project_name, None) - - def get_project_git_url(self, project_name: str) -> str: - """Get git URL for project.""" - project = self.get_project_info(project_name) - if project and project.git_url: - return project.git_url - - # Construct URL from metadata - host = self.metadata.git.host - protocol = self.metadata.git.protocol - - if protocol == "ssh": - return f"git@{host}:ai/{project_name}" - else: - return f"https://{host}/ai/{project_name}" - - def get_project_branch(self, project_name: str) -> str: - """Get branch for project.""" - project = self.get_project_info(project_name) - return project.branch if project else "main" - - def list_projects(self) -> List[str]: - """List all available projects.""" - return list(self.ai.__fields__.keys()) - - -def get_ai_root(custom_dir: Optional[Path] = None) -> Path: - """Get AI ecosystem root directory. - - Priority order: - 1. --dir option (custom_dir parameter) - 2. AI_DOCS_DIR environment variable - 3. ai.gpt config file (docs.ai_root) - 4. Default relative path - """ - if custom_dir: - return custom_dir - - # Check environment variable - import os - env_dir = os.getenv("AI_DOCS_DIR") - if env_dir: - return Path(env_dir) - - # Check ai.gpt config file - try: - from ..config import Config - config = Config() - config_ai_root = config.get("docs.ai_root") - if config_ai_root: - return Path(config_ai_root).expanduser() - except Exception: - # If config loading fails, continue to default - pass - - # Default: From gpt/src/aigpt/docs/config.py, go up to ai/ root - return Path(__file__).parent.parent.parent.parent.parent - - -def get_claude_root(custom_dir: Optional[Path] = None) -> Path: - """Get Claude documentation root directory.""" - return get_ai_root(custom_dir) / "claude" - - -def load_docs_config(custom_dir: Optional[Path] = None) -> DocsConfig: - """Load documentation configuration.""" - config_path = get_ai_root(custom_dir) / "ai.json" - return DocsConfig.load_from_file(config_path) \ No newline at end of file diff --git a/python_backup/src/aigpt/docs/git_utils.py b/python_backup/src/aigpt/docs/git_utils.py deleted file mode 100644 index d4fc911..0000000 --- a/python_backup/src/aigpt/docs/git_utils.py +++ /dev/null @@ -1,397 +0,0 @@ -"""Git utilities for documentation management.""" - -import subprocess -from pathlib import Path -from typing import List, Optional, Tuple - -from rich.console import Console -from rich.progress import track - -from .utils import run_command - -console = Console() - - -def check_git_repository(path: Path) -> bool: - """Check if path is a git repository.""" - return (path / ".git").exists() - - -def get_submodules_status(repo_path: Path) -> List[dict]: - """Get status of all submodules.""" - if not check_git_repository(repo_path): - return [] - - returncode, stdout, stderr = run_command( - ["git", "submodule", "status"], - cwd=repo_path - ) - - if returncode != 0: - return [] - - submodules = [] - for line in stdout.strip().splitlines(): - if line.strip(): - # Parse git submodule status output - # Format: " commit_hash path (tag)" or "-commit_hash path" (not initialized) - parts = line.strip().split() - if len(parts) >= 2: - status_char = line[0] if line else ' ' - commit = parts[0].lstrip('-+ ') - path = parts[1] - - submodules.append({ - "path": path, - "commit": commit, - "initialized": status_char != '-', - "modified": status_char == '+', - "status": status_char - }) - - return submodules - - -def init_and_update_submodules(repo_path: Path, specific_paths: Optional[List[str]] = None) -> Tuple[bool, str]: - """Initialize and update submodules.""" - if not check_git_repository(repo_path): - return False, "Not a git repository" - - try: - # Initialize submodules - console.print("[blue]🔧 Initializing submodules...[/blue]") - returncode, stdout, stderr = run_command( - ["git", "submodule", "init"], - cwd=repo_path - ) - - if returncode != 0: - return False, f"Failed to initialize submodules: {stderr}" - - # Update submodules - console.print("[blue]📦 Updating submodules...[/blue]") - - if specific_paths: - # Update specific submodules - for path in specific_paths: - console.print(f"[dim]Updating {path}...[/dim]") - returncode, stdout, stderr = run_command( - ["git", "submodule", "update", "--init", "--recursive", path], - cwd=repo_path - ) - - if returncode != 0: - return False, f"Failed to update submodule {path}: {stderr}" - else: - # Update all submodules - returncode, stdout, stderr = run_command( - ["git", "submodule", "update", "--init", "--recursive"], - cwd=repo_path - ) - - if returncode != 0: - return False, f"Failed to update submodules: {stderr}" - - console.print("[green]✅ Submodules updated successfully[/green]") - return True, "Submodules updated successfully" - - except Exception as e: - return False, f"Error updating submodules: {str(e)}" - - -def clone_missing_submodules(repo_path: Path, ai_config) -> Tuple[bool, List[str]]: - """Clone missing submodules based on ai.json configuration.""" - if not check_git_repository(repo_path): - return False, ["Not a git repository"] - - try: - # Get current submodules - current_submodules = get_submodules_status(repo_path) - current_paths = {sub["path"] for sub in current_submodules} - - # Get expected projects from ai.json - expected_projects = ai_config.list_projects() - - # Find missing submodules - missing_submodules = [] - for project in expected_projects: - if project not in current_paths: - # Check if directory exists but is not a submodule - project_path = repo_path / project - if not project_path.exists(): - missing_submodules.append(project) - - if not missing_submodules: - console.print("[green]✅ All submodules are present[/green]") - return True, [] - - console.print(f"[yellow]📋 Found {len(missing_submodules)} missing submodules: {missing_submodules}[/yellow]") - - # Clone missing submodules - cloned = [] - for project in track(missing_submodules, description="Cloning missing submodules..."): - git_url = ai_config.get_project_git_url(project) - branch = ai_config.get_project_branch(project) - - console.print(f"[blue]📦 Adding submodule: {project}[/blue]") - console.print(f"[dim]URL: {git_url}[/dim]") - console.print(f"[dim]Branch: {branch}[/dim]") - - returncode, stdout, stderr = run_command( - ["git", "submodule", "add", "-b", branch, git_url, project], - cwd=repo_path - ) - - if returncode == 0: - cloned.append(project) - console.print(f"[green]✅ Added {project}[/green]") - else: - console.print(f"[red]❌ Failed to add {project}: {stderr}[/red]") - - if cloned: - console.print(f"[green]🎉 Successfully cloned {len(cloned)} submodules[/green]") - - return True, cloned - - except Exception as e: - return False, [f"Error cloning submodules: {str(e)}"] - - -def ensure_submodules_available(repo_path: Path, ai_config, auto_clone: bool = True) -> Tuple[bool, List[str]]: - """Ensure all submodules are available, optionally cloning missing ones.""" - console.print("[blue]🔍 Checking submodule status...[/blue]") - - # Get current submodule status - submodules = get_submodules_status(repo_path) - - # Check for uninitialized submodules - uninitialized = [sub for sub in submodules if not sub["initialized"]] - - if uninitialized: - console.print(f"[yellow]📦 Found {len(uninitialized)} uninitialized submodules[/yellow]") - if auto_clone: - success, message = init_and_update_submodules( - repo_path, - [sub["path"] for sub in uninitialized] - ) - if not success: - return False, [message] - else: - return False, [f"Uninitialized submodules: {[sub['path'] for sub in uninitialized]}"] - - # Check for missing submodules (not in .gitmodules but expected) - if auto_clone: - success, cloned = clone_missing_submodules(repo_path, ai_config) - if not success: - return False, cloned - - # If we cloned new submodules, update all to be safe - if cloned: - success, message = init_and_update_submodules(repo_path) - if not success: - return False, [message] - - return True, [] - - -def get_git_branch(repo_path: Path) -> Optional[str]: - """Get current git branch.""" - if not check_git_repository(repo_path): - return None - - returncode, stdout, stderr = run_command( - ["git", "branch", "--show-current"], - cwd=repo_path - ) - - if returncode == 0: - return stdout.strip() - return None - - -def get_git_remote_url(repo_path: Path, remote: str = "origin") -> Optional[str]: - """Get git remote URL.""" - if not check_git_repository(repo_path): - return None - - returncode, stdout, stderr = run_command( - ["git", "remote", "get-url", remote], - cwd=repo_path - ) - - if returncode == 0: - return stdout.strip() - return None - - -def pull_repository(repo_path: Path, branch: Optional[str] = None) -> Tuple[bool, str]: - """Pull latest changes from remote repository.""" - if not check_git_repository(repo_path): - return False, "Not a git repository" - - try: - # Get current branch if not specified - if branch is None: - branch = get_git_branch(repo_path) - if not branch: - # If in detached HEAD state, try to switch to main - console.print("[yellow]⚠️ Repository in detached HEAD state, switching to main...[/yellow]") - returncode, stdout, stderr = run_command( - ["git", "checkout", "main"], - cwd=repo_path - ) - if returncode == 0: - branch = "main" - console.print("[green]✅ Switched to main branch[/green]") - else: - return False, f"Could not switch to main branch: {stderr}" - - console.print(f"[blue]📥 Pulling latest changes for branch: {branch}[/blue]") - - # Check if we have uncommitted changes - returncode, stdout, stderr = run_command( - ["git", "status", "--porcelain"], - cwd=repo_path - ) - - if returncode == 0 and stdout.strip(): - console.print("[yellow]⚠️ Repository has uncommitted changes[/yellow]") - console.print("[dim]Consider committing changes before pull[/dim]") - # Continue anyway, git will handle conflicts - - # Fetch latest changes - console.print("[dim]Fetching from remote...[/dim]") - returncode, stdout, stderr = run_command( - ["git", "fetch", "origin"], - cwd=repo_path - ) - - if returncode != 0: - return False, f"Failed to fetch: {stderr}" - - # Pull changes - returncode, stdout, stderr = run_command( - ["git", "pull", "origin", branch], - cwd=repo_path - ) - - if returncode != 0: - # Check if it's a merge conflict - if "CONFLICT" in stderr or "conflict" in stderr.lower(): - return False, f"Merge conflicts detected: {stderr}" - return False, f"Failed to pull: {stderr}" - - # Check if there were any changes - if "Already up to date" in stdout or "Already up-to-date" in stdout: - console.print("[green]✅ Repository already up to date[/green]") - else: - console.print("[green]✅ Successfully pulled latest changes[/green]") - if stdout.strip(): - console.print(f"[dim]{stdout.strip()}[/dim]") - - return True, "Successfully pulled latest changes" - - except Exception as e: - return False, f"Error pulling repository: {str(e)}" - - -def pull_wiki_repository(wiki_path: Path) -> Tuple[bool, str]: - """Pull latest changes from wiki repository before generating content.""" - if not wiki_path.exists(): - return False, f"Wiki directory not found: {wiki_path}" - - if not check_git_repository(wiki_path): - return False, f"Wiki directory is not a git repository: {wiki_path}" - - console.print(f"[blue]📚 Updating wiki repository: {wiki_path.name}[/blue]") - - return pull_repository(wiki_path) - - -def push_repository(repo_path: Path, branch: Optional[str] = None, commit_message: Optional[str] = None) -> Tuple[bool, str]: - """Commit and push changes to remote repository.""" - if not check_git_repository(repo_path): - return False, "Not a git repository" - - try: - # Get current branch if not specified - if branch is None: - branch = get_git_branch(repo_path) - if not branch: - return False, "Could not determine current branch" - - # Check if we have any changes to commit - returncode, stdout, stderr = run_command( - ["git", "status", "--porcelain"], - cwd=repo_path - ) - - if returncode != 0: - return False, f"Failed to check git status: {stderr}" - - if not stdout.strip(): - console.print("[green]✅ No changes to commit[/green]") - return True, "No changes to commit" - - console.print(f"[blue]📝 Committing changes in: {repo_path.name}[/blue]") - - # Add all changes - returncode, stdout, stderr = run_command( - ["git", "add", "."], - cwd=repo_path - ) - - if returncode != 0: - return False, f"Failed to add changes: {stderr}" - - # Commit changes - if commit_message is None: - commit_message = f"Update wiki content - {Path().cwd().name} documentation sync" - - returncode, stdout, stderr = run_command( - ["git", "commit", "-m", commit_message], - cwd=repo_path - ) - - if returncode != 0: - # Check if there were no changes to commit - if "nothing to commit" in stderr or "nothing added to commit" in stderr: - console.print("[green]✅ No changes to commit[/green]") - return True, "No changes to commit" - return False, f"Failed to commit changes: {stderr}" - - console.print(f"[blue]📤 Pushing to remote branch: {branch}[/blue]") - - # Push to remote - returncode, stdout, stderr = run_command( - ["git", "push", "origin", branch], - cwd=repo_path - ) - - if returncode != 0: - return False, f"Failed to push: {stderr}" - - console.print("[green]✅ Successfully pushed changes to remote[/green]") - if stdout.strip(): - console.print(f"[dim]{stdout.strip()}[/dim]") - - return True, "Successfully committed and pushed changes" - - except Exception as e: - return False, f"Error pushing repository: {str(e)}" - - -def push_wiki_repository(wiki_path: Path, commit_message: Optional[str] = None) -> Tuple[bool, str]: - """Commit and push changes to wiki repository after generating content.""" - if not wiki_path.exists(): - return False, f"Wiki directory not found: {wiki_path}" - - if not check_git_repository(wiki_path): - return False, f"Wiki directory is not a git repository: {wiki_path}" - - console.print(f"[blue]📚 Pushing wiki repository: {wiki_path.name}[/blue]") - - if commit_message is None: - commit_message = "Auto-update wiki content from ai.gpt docs" - - return push_repository(wiki_path, branch="main", commit_message=commit_message) \ No newline at end of file diff --git a/python_backup/src/aigpt/docs/templates.py b/python_backup/src/aigpt/docs/templates.py deleted file mode 100644 index 7e6468a..0000000 --- a/python_backup/src/aigpt/docs/templates.py +++ /dev/null @@ -1,158 +0,0 @@ -"""Template management for documentation generation.""" - -from datetime import datetime -from pathlib import Path -from typing import Dict, List, Optional - -from jinja2 import Environment, FileSystemLoader - -from .config import DocsConfig, get_claude_root - - -class DocumentationTemplateManager: - """Manages Jinja2 templates for documentation generation.""" - - def __init__(self, config: DocsConfig): - self.config = config - self.claude_root = get_claude_root() - self.templates_dir = self.claude_root / "templates" - self.core_dir = self.claude_root / "core" - self.projects_dir = self.claude_root / "projects" - - # Setup Jinja2 environment - self.env = Environment( - loader=FileSystemLoader([ - str(self.templates_dir), - str(self.core_dir), - str(self.projects_dir), - ]), - trim_blocks=True, - lstrip_blocks=True, - ) - - # Add custom filters - self.env.filters["timestamp"] = self._timestamp_filter - - def _timestamp_filter(self, format_str: str = "%Y-%m-%d %H:%M:%S") -> str: - """Jinja2 filter for timestamps.""" - return datetime.now().strftime(format_str) - - def get_template_context(self, project_name: str, components: List[str]) -> Dict: - """Get template context for documentation generation.""" - project_info = self.config.get_project_info(project_name) - - return { - "config": self.config, - "project_name": project_name, - "project_info": project_info, - "components": components, - "timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S"), - "ai_md_content": self._get_ai_md_content(), - } - - def _get_ai_md_content(self) -> Optional[str]: - """Get content from ai.md file.""" - ai_md_path = self.claude_root.parent / "ai.md" - if ai_md_path.exists(): - return ai_md_path.read_text(encoding="utf-8") - return None - - def render_component(self, component_name: str, context: Dict) -> str: - """Render a specific component.""" - component_files = { - "core": ["philosophy.md", "naming.md", "architecture.md"], - "philosophy": ["philosophy.md"], - "naming": ["naming.md"], - "architecture": ["architecture.md"], - "specific": [f"{context['project_name']}.md"], - } - - if component_name not in component_files: - raise ValueError(f"Unknown component: {component_name}") - - content_parts = [] - - for file_name in component_files[component_name]: - file_path = self.core_dir / file_name - if component_name == "specific": - file_path = self.projects_dir / file_name - - if file_path.exists(): - content = file_path.read_text(encoding="utf-8") - content_parts.append(content) - - return "\n\n".join(content_parts) - - def generate_documentation( - self, - project_name: str, - components: List[str], - output_path: Optional[Path] = None, - ) -> str: - """Generate complete documentation.""" - context = self.get_template_context(project_name, components) - - # Build content sections - content_sections = [] - - # Add ai.md header if available - if context["ai_md_content"]: - content_sections.append(context["ai_md_content"]) - content_sections.append("---\n") - - # Add title and metadata - content_sections.append("# エコシステム統合設計書(詳細版)\n") - content_sections.append("このドキュメントは動的生成されました。修正は元ファイルで行ってください。\n") - content_sections.append(f"生成日時: {context['timestamp']}") - content_sections.append(f"対象プロジェクト: {project_name}") - content_sections.append(f"含有コンポーネント: {','.join(components)}\n") - - # Add component content - for component in components: - try: - component_content = self.render_component(component, context) - if component_content.strip(): - content_sections.append(component_content) - except ValueError as e: - print(f"Warning: {e}") - - # Add footer - footer = """ -# footer - -© syui - -# important-instruction-reminders -Do what has been asked; nothing more, nothing less. -NEVER create files unless they're absolutely necessary for achieving your goal. -ALWAYS prefer editing an existing file to creating a new one. -NEVER proactively create documentation files (*.md) or README files. Only create documentation files if explicitly requested by the User. -""" - content_sections.append(footer) - - # Join all sections - final_content = "\n".join(content_sections) - - # Write to file if output path provided - if output_path: - output_path.parent.mkdir(parents=True, exist_ok=True) - output_path.write_text(final_content, encoding="utf-8") - - return final_content - - def list_available_components(self) -> List[str]: - """List available components.""" - return ["core", "philosophy", "naming", "architecture", "specific"] - - def validate_components(self, components: List[str]) -> List[str]: - """Validate and return valid components.""" - available = self.list_available_components() - valid_components = [] - - for component in components: - if component in available: - valid_components.append(component) - else: - print(f"Warning: Unknown component '{component}' (available: {available})") - - return valid_components or ["core", "specific"] # Default fallback \ No newline at end of file diff --git a/python_backup/src/aigpt/docs/utils.py b/python_backup/src/aigpt/docs/utils.py deleted file mode 100644 index efac1d4..0000000 --- a/python_backup/src/aigpt/docs/utils.py +++ /dev/null @@ -1,178 +0,0 @@ -"""Utility functions for documentation management.""" - -import subprocess -import sys -from pathlib import Path -from typing import List, Optional, Tuple - -from rich.console import Console -from rich.progress import Progress, SpinnerColumn, TextColumn - -console = Console() - - -def run_command( - cmd: List[str], - cwd: Optional[Path] = None, - capture_output: bool = True, - verbose: bool = False, -) -> Tuple[int, str, str]: - """Run a command and return exit code, stdout, stderr.""" - if verbose: - console.print(f"[dim]Running: {' '.join(cmd)}[/dim]") - - try: - result = subprocess.run( - cmd, - cwd=cwd, - capture_output=capture_output, - text=True, - check=False, - ) - return result.returncode, result.stdout, result.stderr - except FileNotFoundError: - return 1, "", f"Command not found: {cmd[0]}" - - -def is_git_repository(path: Path) -> bool: - """Check if path is a git repository.""" - return (path / ".git").exists() - - -def get_git_status(repo_path: Path) -> Tuple[bool, List[str]]: - """Get git status for repository.""" - if not is_git_repository(repo_path): - return False, ["Not a git repository"] - - returncode, stdout, stderr = run_command( - ["git", "status", "--porcelain"], - cwd=repo_path - ) - - if returncode != 0: - return False, [stderr.strip()] - - changes = [line.strip() for line in stdout.splitlines() if line.strip()] - return len(changes) == 0, changes - - -def validate_project_name(project_name: str, available_projects: List[str]) -> bool: - """Validate project name against available projects.""" - return project_name in available_projects - - -def format_file_size(size_bytes: int) -> str: - """Format file size in human readable format.""" - for unit in ['B', 'KB', 'MB', 'GB']: - if size_bytes < 1024.0: - return f"{size_bytes:.1f}{unit}" - size_bytes /= 1024.0 - return f"{size_bytes:.1f}TB" - - -def count_lines(file_path: Path) -> int: - """Count lines in a file.""" - try: - with open(file_path, 'r', encoding='utf-8') as f: - return sum(1 for _ in f) - except (OSError, UnicodeDecodeError): - return 0 - - -def find_project_directories(base_path: Path, projects: List[str]) -> dict: - """Find project directories relative to base path.""" - project_dirs = {} - - # Look for directories matching project names - for project in projects: - project_path = base_path / project - if project_path.exists() and project_path.is_dir(): - project_dirs[project] = project_path - - return project_dirs - - -def check_command_available(command: str) -> bool: - """Check if a command is available in PATH.""" - try: - subprocess.run([command, "--version"], - capture_output=True, - check=True) - return True - except (subprocess.CalledProcessError, FileNotFoundError): - return False - - -def get_platform_info() -> dict: - """Get platform information.""" - import platform - - return { - "system": platform.system(), - "release": platform.release(), - "machine": platform.machine(), - "python_version": platform.python_version(), - "python_implementation": platform.python_implementation(), - } - - -class ProgressManager: - """Context manager for rich progress bars.""" - - def __init__(self, description: str = "Processing..."): - self.description = description - self.progress = None - self.task = None - - def __enter__(self): - self.progress = Progress( - SpinnerColumn(), - TextColumn("[progress.description]{task.description}"), - console=console, - ) - self.progress.start() - self.task = self.progress.add_task(self.description, total=None) - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - if self.progress: - self.progress.stop() - - def update(self, description: str): - """Update progress description.""" - if self.progress and self.task is not None: - self.progress.update(self.task, description=description) - - -def safe_write_file(file_path: Path, content: str, backup: bool = True) -> bool: - """Safely write content to file with optional backup.""" - try: - # Create backup if file exists and backup requested - if backup and file_path.exists(): - backup_path = file_path.with_suffix(file_path.suffix + ".bak") - backup_path.write_text(file_path.read_text(), encoding="utf-8") - - # Ensure parent directory exists - file_path.parent.mkdir(parents=True, exist_ok=True) - - # Write content - file_path.write_text(content, encoding="utf-8") - return True - - except (OSError, UnicodeError) as e: - console.print(f"[red]Error writing file {file_path}: {e}[/red]") - return False - - -def confirm_action(message: str, default: bool = False) -> bool: - """Ask user for confirmation.""" - if not sys.stdin.isatty(): - return default - - suffix = " [Y/n]: " if default else " [y/N]: " - response = input(message + suffix).strip().lower() - - if not response: - return default - - return response in ('y', 'yes', 'true', '1') \ No newline at end of file diff --git a/python_backup/src/aigpt/docs/wiki_generator.py b/python_backup/src/aigpt/docs/wiki_generator.py deleted file mode 100644 index da25c7a..0000000 --- a/python_backup/src/aigpt/docs/wiki_generator.py +++ /dev/null @@ -1,314 +0,0 @@ -"""Wiki generation utilities for ai.wiki management.""" - -import re -from pathlib import Path -from typing import Dict, List, Optional, Tuple - -from rich.console import Console - -from .config import DocsConfig, get_ai_root -from .utils import find_project_directories -from .git_utils import pull_wiki_repository, push_wiki_repository - -console = Console() - - -class WikiGenerator: - """Generates wiki content from project documentation.""" - - def __init__(self, config: DocsConfig, ai_root: Path): - self.config = config - self.ai_root = ai_root - self.wiki_root = ai_root / "ai.wiki" if (ai_root / "ai.wiki").exists() else None - - def extract_project_summary(self, project_md_path: Path) -> Dict[str, str]: - """Extract key information from claude/projects/${repo}.md file.""" - if not project_md_path.exists(): - return {"title": "No documentation", "summary": "Project documentation not found", "status": "Unknown"} - - try: - content = project_md_path.read_text(encoding="utf-8") - - # Extract title (first # heading) - title_match = re.search(r'^# (.+)$', content, re.MULTILINE) - title = title_match.group(1) if title_match else "Unknown Project" - - # Extract project overview/summary (look for specific patterns) - summary = self._extract_summary_section(content) - - # Extract status information - status = self._extract_status_info(content) - - # Extract key features/goals - features = self._extract_features(content) - - return { - "title": title, - "summary": summary, - "status": status, - "features": features, - "last_updated": self._get_last_updated_info(content) - } - - except Exception as e: - console.print(f"[yellow]Warning: Failed to parse {project_md_path}: {e}[/yellow]") - return {"title": "Parse Error", "summary": str(e), "status": "Error"} - - def _extract_summary_section(self, content: str) -> str: - """Extract summary or overview section.""" - # Look for common summary patterns - patterns = [ - r'## 概要\s*\n(.*?)(?=\n##|\n#|\Z)', - r'## Overview\s*\n(.*?)(?=\n##|\n#|\Z)', - r'## プロジェクト概要\s*\n(.*?)(?=\n##|\n#|\Z)', - r'\*\*目的\*\*: (.+?)(?=\n|$)', - r'\*\*中核概念\*\*:\s*\n(.*?)(?=\n##|\n#|\Z)', - ] - - for pattern in patterns: - match = re.search(pattern, content, re.DOTALL | re.MULTILINE) - if match: - summary = match.group(1).strip() - # Clean up and truncate - summary = re.sub(r'\n+', ' ', summary) - summary = re.sub(r'\s+', ' ', summary) - return summary[:300] + "..." if len(summary) > 300 else summary - - # Fallback: first paragraph after title - lines = content.split('\n') - summary_lines = [] - found_content = False - - for line in lines: - line = line.strip() - if not line: - if found_content and summary_lines: - break - continue - if line.startswith('#'): - found_content = True - continue - if found_content and not line.startswith('*') and not line.startswith('-'): - summary_lines.append(line) - if len(' '.join(summary_lines)) > 200: - break - - return ' '.join(summary_lines)[:300] + "..." if summary_lines else "No summary available" - - def _extract_status_info(self, content: str) -> str: - """Extract status information.""" - # Look for status patterns - patterns = [ - r'\*\*状況\*\*: (.+?)(?=\n|$)', - r'\*\*Status\*\*: (.+?)(?=\n|$)', - r'\*\*現在の状況\*\*: (.+?)(?=\n|$)', - r'- \*\*状況\*\*: (.+?)(?=\n|$)', - ] - - for pattern in patterns: - match = re.search(pattern, content) - if match: - return match.group(1).strip() - - return "No status information" - - def _extract_features(self, content: str) -> List[str]: - """Extract key features or bullet points.""" - features = [] - - # Look for bullet point lists - lines = content.split('\n') - in_list = False - - for line in lines: - line = line.strip() - if line.startswith('- ') or line.startswith('* '): - feature = line[2:].strip() - if len(feature) > 10 and not feature.startswith('**'): # Skip metadata - features.append(feature) - in_list = True - if len(features) >= 5: # Limit to 5 features - break - elif in_list and not line: - break - - return features - - def _get_last_updated_info(self, content: str) -> str: - """Extract last updated information.""" - patterns = [ - r'生成日時: (.+?)(?=\n|$)', - r'最終更新: (.+?)(?=\n|$)', - r'Last updated: (.+?)(?=\n|$)', - ] - - for pattern in patterns: - match = re.search(pattern, content) - if match: - return match.group(1).strip() - - return "Unknown" - - def generate_project_wiki_page(self, project_name: str, project_info: Dict[str, str]) -> str: - """Generate wiki page for a single project.""" - config_info = self.config.get_project_info(project_name) - - content = f"""# {project_name} - -## 概要 -{project_info['summary']} - -## プロジェクト情報 -- **タイプ**: {config_info.type if config_info else 'Unknown'} -- **説明**: {config_info.text if config_info else 'No description'} -- **ステータス**: {config_info.status if config_info else project_info.get('status', 'Unknown')} -- **ブランチ**: {config_info.branch if config_info else 'main'} -- **最終更新**: {project_info.get('last_updated', 'Unknown')} - -## 主な機能・特徴 -""" - - features = project_info.get('features', []) - if features: - for feature in features: - content += f"- {feature}\n" - else: - content += "- 情報なし\n" - - content += f""" -## リンク -- **Repository**: https://git.syui.ai/ai/{project_name} -- **Project Documentation**: [claude/projects/{project_name}.md](https://git.syui.ai/ai/ai/src/branch/main/claude/projects/{project_name}.md) -- **Generated Documentation**: [{project_name}/claude.md](https://git.syui.ai/ai/{project_name}/src/branch/main/claude.md) - ---- -*このページは claude/projects/{project_name}.md から自動生成されました* -""" - - return content - - def generate_wiki_home_page(self, project_summaries: Dict[str, Dict[str, str]]) -> str: - """Generate the main Home.md page with all project summaries.""" - content = """# AI Ecosystem Wiki - -AI生態系プロジェクトの概要とドキュメント集約ページです。 - -## プロジェクト一覧 - -""" - - # Group projects by type - project_groups = {} - for project_name, info in project_summaries.items(): - config_info = self.config.get_project_info(project_name) - project_type = config_info.type if config_info else 'other' - if isinstance(project_type, list): - project_type = project_type[0] # Use first type - - if project_type not in project_groups: - project_groups[project_type] = [] - project_groups[project_type].append((project_name, info)) - - # Generate sections by type - type_names = { - 'ai': '🧠 AI・知能システム', - 'gpt': '🤖 自律・対話システム', - 'os': '💻 システム・基盤', - 'card': '🎮 ゲーム・エンターテイメント', - 'shell': '⚡ ツール・ユーティリティ', - 'other': '📦 その他' - } - - for project_type, projects in project_groups.items(): - type_display = type_names.get(project_type, f'📁 {project_type}') - content += f"### {type_display}\n\n" - - for project_name, info in projects: - content += f"#### [{project_name}](auto/{project_name}.md)\n" - content += f"{info['summary'][:150]}{'...' if len(info['summary']) > 150 else ''}\n\n" - - # Add quick status - config_info = self.config.get_project_info(project_name) - if config_info: - content += f"**Status**: {config_info.status} \n" - content += f"**Links**: [Repo](https://git.syui.ai/ai/{project_name}) | [Docs](https://git.syui.ai/ai/{project_name}/src/branch/main/claude.md)\n\n" - - content += """ ---- - -## ディレクトリ構成 - -- `auto/` - 自動生成されたプロジェクト概要 -- `claude/` - Claude Code作業記録 -- `manual/` - 手動作成ドキュメント - ---- - -*このページは ai.json と claude/projects/ から自動生成されました* -*最終更新: {last_updated}* -""".format(last_updated=self._get_current_timestamp()) - - return content - - def _get_current_timestamp(self) -> str: - """Get current timestamp.""" - from datetime import datetime - return datetime.now().strftime("%Y-%m-%d %H:%M:%S") - - def update_wiki_auto_directory(self, auto_pull: bool = True) -> Tuple[bool, List[str]]: - """Update the auto/ directory with project summaries.""" - if not self.wiki_root: - return False, ["ai.wiki directory not found"] - - # Pull latest changes from wiki repository first - if auto_pull: - success, message = pull_wiki_repository(self.wiki_root) - if not success: - console.print(f"[yellow]⚠️ Wiki pull failed: {message}[/yellow]") - console.print("[dim]Continuing with local wiki update...[/dim]") - else: - console.print(f"[green]✅ Wiki repository updated[/green]") - - auto_dir = self.wiki_root / "auto" - auto_dir.mkdir(exist_ok=True) - - # Get claude/projects directory - claude_projects_dir = self.ai_root / "claude" / "projects" - if not claude_projects_dir.exists(): - return False, [f"claude/projects directory not found: {claude_projects_dir}"] - - project_summaries = {} - updated_files = [] - - console.print("[blue]📋 Extracting project summaries from claude/projects/...[/blue]") - - # Process all projects from ai.json - for project_name in self.config.list_projects(): - project_md_path = claude_projects_dir / f"{project_name}.md" - - # Extract summary from claude/projects/${project}.md - project_info = self.extract_project_summary(project_md_path) - project_summaries[project_name] = project_info - - # Generate individual project wiki page - wiki_content = self.generate_project_wiki_page(project_name, project_info) - wiki_file_path = auto_dir / f"{project_name}.md" - - try: - wiki_file_path.write_text(wiki_content, encoding="utf-8") - updated_files.append(f"auto/{project_name}.md") - console.print(f"[green]✓ Generated auto/{project_name}.md[/green]") - except Exception as e: - console.print(f"[red]✗ Failed to write auto/{project_name}.md: {e}[/red]") - - # Generate Home.md - try: - home_content = self.generate_wiki_home_page(project_summaries) - home_path = self.wiki_root / "Home.md" - home_path.write_text(home_content, encoding="utf-8") - updated_files.append("Home.md") - console.print(f"[green]✓ Generated Home.md[/green]") - except Exception as e: - console.print(f"[red]✗ Failed to write Home.md: {e}[/red]") - - return True, updated_files \ No newline at end of file diff --git a/python_backup/src/aigpt/fortune.py b/python_backup/src/aigpt/fortune.py deleted file mode 100644 index 0bb1e40..0000000 --- a/python_backup/src/aigpt/fortune.py +++ /dev/null @@ -1,118 +0,0 @@ -"""AI Fortune system for daily personality variations""" - -import json -import random -from datetime import date, datetime, timedelta -from pathlib import Path -from typing import Optional -import logging - -from .models import AIFortune - - -class FortuneSystem: - """Manages daily AI fortune affecting personality""" - - def __init__(self, data_dir: Path): - self.data_dir = data_dir - self.fortune_file = data_dir / "fortunes.json" - self.fortunes: dict[str, AIFortune] = {} - self.logger = logging.getLogger(__name__) - self._load_fortunes() - - def _load_fortunes(self): - """Load fortune history from storage""" - if self.fortune_file.exists(): - with open(self.fortune_file, 'r', encoding='utf-8') as f: - data = json.load(f) - for date_str, fortune_data in data.items(): - # Convert date string back to date object - fortune_data['date'] = datetime.fromisoformat(fortune_data['date']).date() - self.fortunes[date_str] = AIFortune(**fortune_data) - - def _save_fortunes(self): - """Save fortune history to storage""" - data = {} - for date_str, fortune in self.fortunes.items(): - fortune_dict = fortune.model_dump(mode='json') - fortune_dict['date'] = fortune.date.isoformat() - data[date_str] = fortune_dict - - with open(self.fortune_file, 'w', encoding='utf-8') as f: - json.dump(data, f, indent=2) - - def get_today_fortune(self) -> AIFortune: - """Get or generate today's fortune""" - today = date.today() - today_str = today.isoformat() - - if today_str in self.fortunes: - return self.fortunes[today_str] - - # Generate new fortune - fortune_value = random.randint(1, 10) - - # Check yesterday's fortune for consecutive tracking - yesterday = (today - timedelta(days=1)) - yesterday_str = yesterday.isoformat() - - consecutive_good = 0 - consecutive_bad = 0 - breakthrough_triggered = False - - if yesterday_str in self.fortunes: - yesterday_fortune = self.fortunes[yesterday_str] - - if fortune_value >= 7: # Good fortune - if yesterday_fortune.fortune_value >= 7: - consecutive_good = yesterday_fortune.consecutive_good + 1 - else: - consecutive_good = 1 - elif fortune_value <= 3: # Bad fortune - if yesterday_fortune.fortune_value <= 3: - consecutive_bad = yesterday_fortune.consecutive_bad + 1 - else: - consecutive_bad = 1 - - # Check breakthrough conditions - if consecutive_good >= 3: - breakthrough_triggered = True - self.logger.info("Breakthrough! 3 consecutive good fortunes!") - fortune_value = 10 # Max fortune on breakthrough - elif consecutive_bad >= 3: - breakthrough_triggered = True - self.logger.info("Breakthrough! 3 consecutive bad fortunes!") - fortune_value = random.randint(7, 10) # Good fortune after bad streak - - fortune = AIFortune( - date=today, - fortune_value=fortune_value, - consecutive_good=consecutive_good, - consecutive_bad=consecutive_bad, - breakthrough_triggered=breakthrough_triggered - ) - - self.fortunes[today_str] = fortune - self._save_fortunes() - - self.logger.info(f"Today's fortune: {fortune_value}/10") - return fortune - - def get_personality_modifier(self, fortune: AIFortune) -> dict[str, float]: - """Get personality modifiers based on fortune""" - base_modifier = fortune.fortune_value / 10.0 - - modifiers = { - "optimism": base_modifier, - "energy": base_modifier * 0.8, - "patience": 1.0 - (abs(5.5 - fortune.fortune_value) * 0.1), - "creativity": 0.5 + (base_modifier * 0.5), - "empathy": 0.7 + (base_modifier * 0.3) - } - - # Breakthrough effects - if fortune.breakthrough_triggered: - modifiers["confidence"] = 1.0 - modifiers["spontaneity"] = 0.9 - - return modifiers \ No newline at end of file diff --git a/python_backup/src/aigpt/mcp_server.py b/python_backup/src/aigpt/mcp_server.py deleted file mode 100644 index 3ee9916..0000000 --- a/python_backup/src/aigpt/mcp_server.py +++ /dev/null @@ -1,1016 +0,0 @@ -"""MCP Server for ai.gpt system""" - -from typing import Optional, List, Dict, Any -from fastapi_mcp import FastApiMCP -from fastapi import FastAPI -from pathlib import Path -import logging -import subprocess -import os -import shlex -import httpx -import json -from .ai_provider import create_ai_provider - -from .persona import Persona -from .models import Memory, Relationship, PersonaState - -logger = logging.getLogger(__name__) - - -class AIGptMcpServer: - """MCP Server that exposes ai.gpt functionality to AI assistants""" - - def __init__(self, data_dir: Path): - self.data_dir = data_dir - self.persona = Persona(data_dir) - - # Create FastAPI app - self.app = FastAPI( - title="AI.GPT Memory and Relationship System", - description="MCP server for ai.gpt system" - ) - - # Create MCP server with FastAPI app - self.server = FastApiMCP(self.app) - - # Check if ai.card exists - self.card_dir = Path("./card") - self.has_card = self.card_dir.exists() and self.card_dir.is_dir() - - # Check if ai.log exists - self.log_dir = Path("./log") - self.has_log = self.log_dir.exists() and self.log_dir.is_dir() - - self._register_tools() - - # Register ai.card tools if available - if self.has_card: - self._register_card_tools() - - # Register ai.log tools if available - if self.has_log: - self._register_log_tools() - - def _register_tools(self): - """Register all MCP tools""" - - @self.app.get("/get_memories", operation_id="get_memories") - async def get_memories(user_id: Optional[str] = None, limit: int = 10) -> List[Dict[str, Any]]: - """Get active memories from the AI's memory system""" - memories = self.persona.memory.get_active_memories(limit=limit) - return [ - { - "id": mem.id, - "content": mem.content, - "level": mem.level.value, - "importance": mem.importance_score, - "is_core": mem.is_core, - "timestamp": mem.timestamp.isoformat() - } - for mem in memories - ] - - @self.app.get("/get_contextual_memories", operation_id="get_contextual_memories") - async def get_contextual_memories(query: str = "", limit: int = 10) -> Dict[str, List[Dict[str, Any]]]: - """Get memories organized by priority with contextual relevance""" - memory_groups = self.persona.memory.get_contextual_memories(query=query, limit=limit) - - result = {} - for group_name, memories in memory_groups.items(): - result[group_name] = [ - { - "id": mem.id, - "content": mem.content, - "level": mem.level.value, - "importance": mem.importance_score, - "is_core": mem.is_core, - "timestamp": mem.timestamp.isoformat(), - "summary": mem.summary, - "metadata": mem.metadata - } - for mem in memories - ] - return result - - @self.app.post("/search_memories", operation_id="search_memories") - async def search_memories(keywords: List[str], memory_types: Optional[List[str]] = None) -> List[Dict[str, Any]]: - """Search memories by keywords and optionally filter by memory types""" - from .models import MemoryLevel - - # Convert string memory types to enum if provided - level_filter = None - if memory_types: - level_filter = [] - for mt in memory_types: - try: - level_filter.append(MemoryLevel(mt)) - except ValueError: - pass # Skip invalid memory types - - memories = self.persona.memory.search_memories(keywords, memory_types=level_filter) - return [ - { - "id": mem.id, - "content": mem.content, - "level": mem.level.value, - "importance": mem.importance_score, - "is_core": mem.is_core, - "timestamp": mem.timestamp.isoformat(), - "summary": mem.summary, - "metadata": mem.metadata - } - for mem in memories - ] - - @self.app.post("/create_summary", operation_id="create_summary") - async def create_summary(user_id: str) -> Dict[str, Any]: - """Create an AI-powered summary of recent memories""" - try: - ai_provider = create_ai_provider() - summary = self.persona.memory.create_smart_summary(user_id, ai_provider=ai_provider) - - if summary: - return { - "success": True, - "summary": { - "id": summary.id, - "content": summary.content, - "level": summary.level.value, - "importance": summary.importance_score, - "timestamp": summary.timestamp.isoformat(), - "metadata": summary.metadata - } - } - else: - return {"success": False, "reason": "Not enough memories to summarize"} - except Exception as e: - logger.error(f"Failed to create summary: {e}") - return {"success": False, "reason": str(e)} - - @self.app.post("/create_core_memory", operation_id="create_core_memory") - async def create_core_memory() -> Dict[str, Any]: - """Create a core memory by analyzing all existing memories""" - try: - ai_provider = create_ai_provider() - core_memory = self.persona.memory.create_core_memory(ai_provider=ai_provider) - - if core_memory: - return { - "success": True, - "core_memory": { - "id": core_memory.id, - "content": core_memory.content, - "level": core_memory.level.value, - "importance": core_memory.importance_score, - "timestamp": core_memory.timestamp.isoformat(), - "metadata": core_memory.metadata - } - } - else: - return {"success": False, "reason": "Not enough memories to create core memory"} - except Exception as e: - logger.error(f"Failed to create core memory: {e}") - return {"success": False, "reason": str(e)} - - @self.app.get("/get_relationship", operation_id="get_relationship") - async def get_relationship(user_id: str) -> Dict[str, Any]: - """Get relationship status with a specific user""" - rel = self.persona.relationships.get_or_create_relationship(user_id) - return { - "user_id": rel.user_id, - "status": rel.status.value, - "score": rel.score, - "transmission_enabled": rel.transmission_enabled, - "is_broken": rel.is_broken, - "total_interactions": rel.total_interactions, - "last_interaction": rel.last_interaction.isoformat() if rel.last_interaction else None - } - - @self.app.get("/get_all_relationships", operation_id="get_all_relationships") - async def get_all_relationships() -> List[Dict[str, Any]]: - """Get all relationships""" - relationships = [] - for user_id, rel in self.persona.relationships.relationships.items(): - relationships.append({ - "user_id": user_id, - "status": rel.status.value, - "score": rel.score, - "transmission_enabled": rel.transmission_enabled, - "is_broken": rel.is_broken - }) - return relationships - - @self.app.get("/get_persona_state", operation_id="get_persona_state") - async def get_persona_state() -> Dict[str, Any]: - """Get current persona state including fortune and mood""" - state = self.persona.get_current_state() - return { - "mood": state.current_mood, - "fortune": { - "value": state.fortune.fortune_value, - "date": state.fortune.date.isoformat(), - "breakthrough": state.fortune.breakthrough_triggered - }, - "personality": state.base_personality, - "active_memory_count": len(state.active_memories) - } - - @self.app.post("/get_context_prompt", operation_id="get_context_prompt") - async def get_context_prompt(user_id: str, message: str) -> Dict[str, Any]: - """Get context-aware prompt for AI response generation""" - try: - context_prompt = self.persona.build_context_prompt(user_id, message) - return { - "success": True, - "context_prompt": context_prompt, - "user_id": user_id, - "message": message - } - except Exception as e: - logger.error(f"Failed to build context prompt: {e}") - return {"success": False, "reason": str(e)} - - @self.app.post("/process_interaction", operation_id="process_interaction") - async def process_interaction(user_id: str, message: str) -> Dict[str, Any]: - """Process an interaction with a user""" - response, relationship_delta = self.persona.process_interaction(user_id, message) - rel = self.persona.relationships.get_or_create_relationship(user_id) - - return { - "response": response, - "relationship_delta": relationship_delta, - "new_relationship_score": rel.score, - "transmission_enabled": rel.transmission_enabled, - "relationship_status": rel.status.value - } - - @self.app.get("/check_transmission_eligibility", operation_id="check_transmission_eligibility") - async def check_transmission_eligibility(user_id: str) -> Dict[str, Any]: - """Check if AI can transmit to a specific user""" - can_transmit = self.persona.can_transmit_to(user_id) - rel = self.persona.relationships.get_or_create_relationship(user_id) - - return { - "can_transmit": can_transmit, - "relationship_score": rel.score, - "threshold": rel.threshold, - "is_broken": rel.is_broken, - "transmission_enabled": rel.transmission_enabled - } - - @self.app.get("/get_fortune", operation_id="get_fortune") - async def get_fortune() -> Dict[str, Any]: - """Get today's AI fortune""" - fortune = self.persona.fortune_system.get_today_fortune() - modifiers = self.persona.fortune_system.get_personality_modifier(fortune) - - return { - "value": fortune.fortune_value, - "date": fortune.date.isoformat(), - "consecutive_good": fortune.consecutive_good, - "consecutive_bad": fortune.consecutive_bad, - "breakthrough": fortune.breakthrough_triggered, - "personality_modifiers": modifiers - } - - @self.app.post("/summarize_memories", operation_id="summarize_memories") - async def summarize_memories(user_id: str) -> Optional[Dict[str, Any]]: - """Create a summary of recent memories for a user""" - summary = self.persona.memory.summarize_memories(user_id) - if summary: - return { - "id": summary.id, - "content": summary.content, - "level": summary.level.value, - "timestamp": summary.timestamp.isoformat() - } - return None - - @self.app.post("/run_maintenance", operation_id="run_maintenance") - async def run_maintenance() -> Dict[str, str]: - """Run daily maintenance tasks""" - self.persona.daily_maintenance() - return {"status": "Maintenance completed successfully"} - - # Shell integration tools (ai.shell) - @self.app.post("/execute_command", operation_id="execute_command") - async def execute_command(command: str, working_dir: str = ".") -> Dict[str, Any]: - """Execute a shell command""" - try: - result = subprocess.run( - shlex.split(command), - cwd=working_dir, - capture_output=True, - text=True, - timeout=60 - ) - - return { - "status": "success" if result.returncode == 0 else "error", - "returncode": result.returncode, - "stdout": result.stdout, - "stderr": result.stderr, - "command": command - } - except subprocess.TimeoutExpired: - return {"error": "Command timed out"} - except Exception as e: - return {"error": str(e)} - - @self.app.post("/analyze_file", operation_id="analyze_file") - async def analyze_file(file_path: str, analysis_prompt: str = "Analyze this file") -> Dict[str, Any]: - """Analyze a file using AI""" - try: - if not os.path.exists(file_path): - return {"error": f"File not found: {file_path}"} - - with open(file_path, 'r', encoding='utf-8') as f: - content = f.read() - - # Get AI provider from app state - ai_provider = getattr(self.app.state, 'ai_provider', 'ollama') - ai_model = getattr(self.app.state, 'ai_model', 'qwen2.5') - - provider = create_ai_provider(ai_provider, ai_model) - - # Analyze with AI - prompt = f"{analysis_prompt}\n\nFile: {file_path}\n\nContent:\n{content}" - analysis = provider.generate_response(prompt, "You are a code analyst.") - - return { - "analysis": analysis, - "file_path": file_path, - "file_size": len(content), - "line_count": len(content.split('\n')) - } - except Exception as e: - return {"error": str(e)} - - @self.app.post("/write_file", operation_id="write_file") - async def write_file(file_path: str, content: str, backup: bool = True) -> Dict[str, Any]: - """Write content to a file""" - try: - file_path_obj = Path(file_path) - - # Create backup if requested - backup_path = None - if backup and file_path_obj.exists(): - backup_path = f"{file_path}.backup" - with open(file_path, 'r', encoding='utf-8') as src: - with open(backup_path, 'w', encoding='utf-8') as dst: - dst.write(src.read()) - - # Write file - file_path_obj.parent.mkdir(parents=True, exist_ok=True) - with open(file_path, 'w', encoding='utf-8') as f: - f.write(content) - - return { - "status": "success", - "file_path": file_path, - "backup_path": backup_path, - "bytes_written": len(content.encode('utf-8')) - } - except Exception as e: - return {"error": str(e)} - - @self.app.get("/read_project_file", operation_id="read_project_file") - async def read_project_file(file_name: str = "aishell.md") -> Dict[str, Any]: - """Read project files like aishell.md (similar to claude.md)""" - try: - # Check common locations - search_paths = [ - Path.cwd() / file_name, - Path.cwd() / "docs" / file_name, - self.data_dir.parent / file_name, - ] - - for path in search_paths: - if path.exists(): - with open(path, 'r', encoding='utf-8') as f: - content = f.read() - return { - "content": content, - "path": str(path), - "exists": True - } - - return { - "exists": False, - "searched_paths": [str(p) for p in search_paths], - "error": f"{file_name} not found" - } - except Exception as e: - return {"error": str(e)} - - @self.app.get("/list_files", operation_id="list_files") - async def list_files(directory: str = ".", pattern: str = "*") -> Dict[str, Any]: - """List files in a directory""" - try: - dir_path = Path(directory) - if not dir_path.exists(): - return {"error": f"Directory not found: {directory}"} - - files = [] - for item in dir_path.glob(pattern): - files.append({ - "name": item.name, - "path": str(item), - "is_file": item.is_file(), - "is_dir": item.is_dir(), - "size": item.stat().st_size if item.is_file() else None - }) - - return { - "directory": directory, - "pattern": pattern, - "files": files, - "count": len(files) - } - except Exception as e: - return {"error": str(e)} - - # ai.bot integration tools - @self.app.post("/remote_shell", operation_id="remote_shell") - async def remote_shell(command: str, ai_bot_url: str = "http://localhost:8080") -> Dict[str, Any]: - """Execute command via ai.bot /sh functionality (systemd-nspawn isolated execution)""" - try: - async with httpx.AsyncClient(timeout=30.0) as client: - # ai.bot の /sh エンドポイントに送信 - response = await client.post( - f"{ai_bot_url}/sh", - json={"command": command}, - headers={"Content-Type": "application/json"} - ) - - if response.status_code == 200: - result = response.json() - return { - "status": "success", - "command": command, - "output": result.get("output", ""), - "error": result.get("error", ""), - "exit_code": result.get("exit_code", 0), - "execution_time": result.get("execution_time", ""), - "container_id": result.get("container_id", ""), - "isolated": True # systemd-nspawn isolation - } - else: - return { - "status": "error", - "error": f"ai.bot responded with status {response.status_code}", - "response_text": response.text - } - except httpx.TimeoutException: - return {"status": "error", "error": "Request to ai.bot timed out"} - except Exception as e: - return {"status": "error", "error": f"Failed to connect to ai.bot: {str(e)}"} - - @self.app.get("/ai_bot_status", operation_id="ai_bot_status") - async def ai_bot_status(ai_bot_url: str = "http://localhost:8080") -> Dict[str, Any]: - """Check ai.bot server status and available commands""" - try: - async with httpx.AsyncClient(timeout=10.0) as client: - response = await client.get(f"{ai_bot_url}/status") - - if response.status_code == 200: - result = response.json() - return { - "status": "online", - "ai_bot_url": ai_bot_url, - "server_info": result, - "shell_available": True - } - else: - return { - "status": "error", - "error": f"ai.bot status check failed: {response.status_code}" - } - except Exception as e: - return { - "status": "offline", - "error": f"Cannot connect to ai.bot: {str(e)}", - "ai_bot_url": ai_bot_url - } - - @self.app.post("/isolated_python", operation_id="isolated_python") - async def isolated_python(code: str, ai_bot_url: str = "http://localhost:8080") -> Dict[str, Any]: - """Execute Python code in isolated ai.bot environment""" - # Python コードを /sh 経由で実行 - python_command = f'python3 -c "{code.replace('"', '\\"')}"' - return await remote_shell(python_command, ai_bot_url) - - def _register_card_tools(self): - """Register ai.card MCP tools when card directory exists""" - logger.info("Registering ai.card tools...") - - @self.app.get("/card_get_user_cards", operation_id="card_get_user_cards") - async def card_get_user_cards(did: str, limit: int = 10) -> Dict[str, Any]: - """Get user's card collection from ai.card system""" - logger.info(f"🎴 [ai.card] Getting cards for did: {did}, limit: {limit}") - try: - url = "http://localhost:8000/get_user_cards" - async with httpx.AsyncClient(timeout=10.0) as client: - logger.info(f"🎴 [ai.card] Calling: {url}") - response = await client.get( - url, - params={"did": did, "limit": limit} - ) - if response.status_code == 200: - cards = response.json() - return { - "cards": cards, - "count": len(cards), - "did": did - } - else: - return {"error": f"Failed to get cards: {response.status_code}"} - except httpx.ConnectError: - return { - "error": "ai.card server is not running", - "hint": "Please start ai.card server: cd card && ./start_server.sh", - "details": "Connection refused to http://localhost:8000" - } - except Exception as e: - return {"error": f"ai.card connection failed: {str(e)}"} - - @self.app.post("/card_draw_card", operation_id="card_draw_card") - async def card_draw_card(did: str, is_paid: bool = False) -> Dict[str, Any]: - """Draw a card from gacha system""" - try: - async with httpx.AsyncClient(timeout=10.0) as client: - response = await client.post( - f"http://localhost:8000/draw_card?did={did}&is_paid={is_paid}" - ) - if response.status_code == 200: - return response.json() - else: - return {"error": f"Failed to draw card: {response.status_code}"} - except httpx.ConnectError: - return { - "error": "ai.card server is not running", - "hint": "Please start ai.card server: cd card && ./start_server.sh", - "details": "Connection refused to http://localhost:8000" - } - except Exception as e: - return {"error": f"ai.card connection failed: {str(e)}"} - - @self.app.get("/card_get_card_details", operation_id="card_get_card_details") - async def card_get_card_details(card_id: int) -> Dict[str, Any]: - """Get detailed information about a specific card""" - try: - async with httpx.AsyncClient(timeout=10.0) as client: - response = await client.get( - "http://localhost:8000/get_card_details", - params={"card_id": card_id} - ) - if response.status_code == 200: - return response.json() - else: - return {"error": f"Failed to get card details: {response.status_code}"} - except httpx.ConnectError: - return { - "error": "ai.card server is not running", - "hint": "Please start ai.card server: cd card && ./start_server.sh", - "details": "Connection refused to http://localhost:8000" - } - except Exception as e: - return {"error": f"ai.card connection failed: {str(e)}"} - - @self.app.get("/card_analyze_collection", operation_id="card_analyze_collection") - async def card_analyze_collection(did: str) -> Dict[str, Any]: - """Analyze user's card collection statistics""" - try: - async with httpx.AsyncClient(timeout=10.0) as client: - response = await client.get( - "http://localhost:8000/analyze_card_collection", - params={"did": did} - ) - if response.status_code == 200: - return response.json() - else: - return {"error": f"Failed to analyze collection: {response.status_code}"} - except httpx.ConnectError: - return { - "error": "ai.card server is not running", - "hint": "Please start ai.card server: cd card && ./start_server.sh", - "details": "Connection refused to http://localhost:8000" - } - except Exception as e: - return {"error": f"ai.card connection failed: {str(e)}"} - - @self.app.get("/card_get_gacha_stats", operation_id="card_get_gacha_stats") - async def card_get_gacha_stats() -> Dict[str, Any]: - """Get gacha system statistics""" - try: - async with httpx.AsyncClient(timeout=10.0) as client: - response = await client.get("http://localhost:8000/get_gacha_stats") - if response.status_code == 200: - return response.json() - else: - return {"error": f"Failed to get gacha stats: {response.status_code}"} - except httpx.ConnectError: - return { - "error": "ai.card server is not running", - "hint": "Please start ai.card server: cd card && ./start_server.sh", - "details": "Connection refused to http://localhost:8000" - } - except Exception as e: - return {"error": f"ai.card connection failed: {str(e)}"} - - @self.app.get("/card_system_status", operation_id="card_system_status") - async def card_system_status() -> Dict[str, Any]: - """Check ai.card system status""" - try: - async with httpx.AsyncClient(timeout=5.0) as client: - response = await client.get("http://localhost:8000/health") - if response.status_code == 200: - return { - "status": "online", - "health": response.json(), - "card_dir": str(self.card_dir) - } - else: - return { - "status": "error", - "error": f"Health check failed: {response.status_code}" - } - except Exception as e: - return { - "status": "offline", - "error": f"ai.card is not running: {str(e)}", - "hint": "Start ai.card with: cd card && ./start_server.sh" - } - - @self.app.post("/isolated_analysis", operation_id="isolated_analysis") - async def isolated_analysis(file_path: str, analysis_type: str = "structure", ai_bot_url: str = "http://localhost:8080") -> Dict[str, Any]: - """Perform code analysis in isolated environment""" - if analysis_type == "structure": - command = f"find {file_path} -type f -name '*.py' | head -20" - elif analysis_type == "lines": - command = f"wc -l {file_path}" - elif analysis_type == "syntax": - command = f"python3 -m py_compile {file_path}" - else: - command = f"file {file_path}" - - return await remote_shell(command, ai_bot_url) - - # Mount MCP server - self.server.mount() - - def _register_log_tools(self): - """Register ai.log MCP tools when log directory exists""" - logger.info("Registering ai.log tools...") - - @self.app.post("/log_create_post", operation_id="log_create_post") - async def log_create_post(title: str, content: str, tags: Optional[List[str]] = None, slug: Optional[str] = None) -> Dict[str, Any]: - """Create a new blog post in ai.log system""" - logger.info(f"📝 [ai.log] Creating post: {title}") - try: - async with httpx.AsyncClient(timeout=30.0) as client: - response = await client.post( - "http://localhost:8002/mcp/tools/call", - json={ - "jsonrpc": "2.0", - "id": "log_create_post", - "method": "call_tool", - "params": { - "name": "create_blog_post", - "arguments": { - "title": title, - "content": content, - "tags": tags or [], - "slug": slug - } - } - } - ) - if response.status_code == 200: - result = response.json() - if result.get("error"): - return {"error": result["error"]["message"]} - return { - "success": True, - "message": "Blog post created successfully", - "title": title, - "tags": tags or [] - } - else: - return {"error": f"Failed to create post: {response.status_code}"} - except httpx.ConnectError: - return { - "error": "ai.log server is not running", - "hint": "Please start ai.log server: cd log && cargo run -- mcp --port 8002", - "details": "Connection refused to http://localhost:8002" - } - except Exception as e: - return {"error": f"ai.log connection failed: {str(e)}"} - - @self.app.get("/log_list_posts", operation_id="log_list_posts") - async def log_list_posts(limit: int = 10, offset: int = 0) -> Dict[str, Any]: - """List blog posts from ai.log system""" - logger.info(f"📝 [ai.log] Listing posts: limit={limit}, offset={offset}") - try: - async with httpx.AsyncClient(timeout=10.0) as client: - response = await client.post( - "http://localhost:8002/mcp/tools/call", - json={ - "jsonrpc": "2.0", - "id": "log_list_posts", - "method": "call_tool", - "params": { - "name": "list_blog_posts", - "arguments": { - "limit": limit, - "offset": offset - } - } - } - ) - if response.status_code == 200: - result = response.json() - if result.get("error"): - return {"error": result["error"]["message"]} - return result.get("result", {}) - else: - return {"error": f"Failed to list posts: {response.status_code}"} - except httpx.ConnectError: - return { - "error": "ai.log server is not running", - "hint": "Please start ai.log server: cd log && cargo run -- mcp --port 8002", - "details": "Connection refused to http://localhost:8002" - } - except Exception as e: - return {"error": f"ai.log connection failed: {str(e)}"} - - @self.app.post("/log_build_blog", operation_id="log_build_blog") - async def log_build_blog(enable_ai: bool = True, translate: bool = False) -> Dict[str, Any]: - """Build the static blog with AI features""" - logger.info(f"📝 [ai.log] Building blog: AI={enable_ai}, translate={translate}") - try: - async with httpx.AsyncClient(timeout=60.0) as client: - response = await client.post( - "http://localhost:8002/mcp/tools/call", - json={ - "jsonrpc": "2.0", - "id": "log_build_blog", - "method": "call_tool", - "params": { - "name": "build_blog", - "arguments": { - "enable_ai": enable_ai, - "translate": translate - } - } - } - ) - if response.status_code == 200: - result = response.json() - if result.get("error"): - return {"error": result["error"]["message"]} - return { - "success": True, - "message": "Blog built successfully", - "ai_enabled": enable_ai, - "translation_enabled": translate - } - else: - return {"error": f"Failed to build blog: {response.status_code}"} - except httpx.ConnectError: - return { - "error": "ai.log server is not running", - "hint": "Please start ai.log server: cd log && cargo run -- mcp --port 8002", - "details": "Connection refused to http://localhost:8002" - } - except Exception as e: - return {"error": f"ai.log connection failed: {str(e)}"} - - @self.app.get("/log_get_post", operation_id="log_get_post") - async def log_get_post(slug: str) -> Dict[str, Any]: - """Get blog post content by slug""" - logger.info(f"📝 [ai.log] Getting post: {slug}") - try: - async with httpx.AsyncClient(timeout=10.0) as client: - response = await client.post( - "http://localhost:8002/mcp/tools/call", - json={ - "jsonrpc": "2.0", - "id": "log_get_post", - "method": "call_tool", - "params": { - "name": "get_post_content", - "arguments": { - "slug": slug - } - } - } - ) - if response.status_code == 200: - result = response.json() - if result.get("error"): - return {"error": result["error"]["message"]} - return result.get("result", {}) - else: - return {"error": f"Failed to get post: {response.status_code}"} - except httpx.ConnectError: - return { - "error": "ai.log server is not running", - "hint": "Please start ai.log server: cd log && cargo run -- mcp --port 8002", - "details": "Connection refused to http://localhost:8002" - } - except Exception as e: - return {"error": f"ai.log connection failed: {str(e)}"} - - @self.app.get("/log_system_status", operation_id="log_system_status") - async def log_system_status() -> Dict[str, Any]: - """Check ai.log system status""" - try: - async with httpx.AsyncClient(timeout=5.0) as client: - response = await client.get("http://localhost:8002/health") - if response.status_code == 200: - return { - "status": "online", - "health": response.json(), - "log_dir": str(self.log_dir) - } - else: - return { - "status": "error", - "error": f"Health check failed: {response.status_code}" - } - except Exception as e: - return { - "status": "offline", - "error": f"ai.log is not running: {str(e)}", - "hint": "Start ai.log with: cd log && cargo run -- mcp --port 8002" - } - - @self.app.post("/log_ai_content", operation_id="log_ai_content") - async def log_ai_content(user_id: str, topic: str = "daily thoughts") -> Dict[str, Any]: - """Generate AI content for blog from memories and create post""" - logger.info(f"📝 [ai.log] Generating AI content for: {topic}") - try: - # Get contextual memories for the topic - memories = await get_contextual_memories(topic, limit=5) - - # Get AI provider - ai_provider = create_ai_provider() - - # Build content from memories - memory_context = "" - for group_name, mem_list in memories.items(): - memory_context += f"\n## {group_name}\n" - for mem in mem_list: - memory_context += f"- {mem['content']}\n" - - # Generate blog content - prompt = f"""Based on the following memories and context, write a thoughtful blog post about {topic}. - -Memory Context: -{memory_context} - -Please write a well-structured blog post in Markdown format with: -1. An engaging title -2. Clear structure with headings -3. Personal insights based on the memories -4. A conclusion that ties everything together - -Focus on creating content that reflects personal growth and learning from these experiences.""" - - content = ai_provider.generate_response(prompt, "You are a thoughtful blogger who creates insightful content.") - - # Extract title from content (first heading) - lines = content.split('\n') - title = topic.title() - for line in lines: - if line.startswith('# '): - title = line[2:].strip() - content = '\n'.join(lines[1:]).strip() # Remove title from content - break - - # Create the blog post - return await log_create_post( - title=title, - content=content, - tags=["AI", "thoughts", "daily"] - ) - - except Exception as e: - return {"error": f"Failed to generate AI content: {str(e)}"} - - @self.app.post("/log_translate_document", operation_id="log_translate_document") - async def log_translate_document( - input_file: str, - target_lang: str, - source_lang: Optional[str] = None, - output_file: Optional[str] = None, - model: str = "qwen2.5:latest", - ollama_endpoint: str = "http://localhost:11434" - ) -> Dict[str, Any]: - """Translate markdown documents using Ollama via ai.log""" - logger.info(f"🌍 [ai.log] Translating document: {input_file} -> {target_lang}") - try: - async with httpx.AsyncClient(timeout=60.0) as client: # Longer timeout for translation - response = await client.post( - "http://localhost:8002/mcp/tools/call", - json={ - "jsonrpc": "2.0", - "id": "log_translate_document", - "method": "call_tool", - "params": { - "name": "translate_document", - "arguments": { - "input_file": input_file, - "target_lang": target_lang, - "source_lang": source_lang, - "output_file": output_file, - "model": model, - "ollama_endpoint": ollama_endpoint - } - } - } - ) - if response.status_code == 200: - result = response.json() - if result.get("error"): - return {"error": result["error"]["message"]} - return { - "success": True, - "message": "Document translated successfully", - "input_file": input_file, - "target_lang": target_lang, - "output_file": result.get("result", {}).get("output_file") - } - else: - return {"error": f"Failed to translate document: {response.status_code}"} - except httpx.ConnectError: - return { - "error": "ai.log server is not running", - "hint": "Please start ai.log server: cd log && cargo run -- mcp --port 8002", - "details": "Connection refused to http://localhost:8002" - } - except Exception as e: - return {"error": f"ai.log translation failed: {str(e)}"} - - @self.app.post("/log_generate_docs", operation_id="log_generate_docs") - async def log_generate_docs( - doc_type: str, # "readme", "api", "structure", "changelog" - source_path: Optional[str] = None, - output_path: Optional[str] = None, - with_ai: bool = True, - include_deps: bool = False, - format_type: str = "markdown" - ) -> Dict[str, Any]: - """Generate documentation using ai.log's doc generation features""" - logger.info(f"📚 [ai.log] Generating {doc_type} documentation") - try: - async with httpx.AsyncClient(timeout=30.0) as client: - response = await client.post( - "http://localhost:8002/mcp/tools/call", - json={ - "jsonrpc": "2.0", - "id": "log_generate_docs", - "method": "call_tool", - "params": { - "name": "generate_documentation", - "arguments": { - "doc_type": doc_type, - "source_path": source_path or ".", - "output_path": output_path, - "with_ai": with_ai, - "include_deps": include_deps, - "format_type": format_type - } - } - } - ) - if response.status_code == 200: - result = response.json() - if result.get("error"): - return {"error": result["error"]["message"]} - return { - "success": True, - "message": f"{doc_type.title()} documentation generated successfully", - "doc_type": doc_type, - "output_path": result.get("result", {}).get("output_path") - } - else: - return {"error": f"Failed to generate documentation: {response.status_code}"} - except httpx.ConnectError: - return { - "error": "ai.log server is not running", - "hint": "Please start ai.log server: cd log && cargo run -- mcp --port 8002", - "details": "Connection refused to http://localhost:8002" - } - except Exception as e: - return {"error": f"ai.log documentation generation failed: {str(e)}"} - - def get_server(self) -> FastApiMCP: - """Get the FastAPI MCP server instance""" - return self.server - - async def close(self): - """Cleanup resources""" - pass \ No newline at end of file diff --git a/python_backup/src/aigpt/mcp_server_simple.py b/python_backup/src/aigpt/mcp_server_simple.py deleted file mode 100644 index 4215b2b..0000000 --- a/python_backup/src/aigpt/mcp_server_simple.py +++ /dev/null @@ -1,146 +0,0 @@ -"""Simple MCP Server implementation for ai.gpt""" - -from mcp import Server -from mcp.types import Tool, TextContent -from pathlib import Path -from typing import Any, Dict, List, Optional -import json - -from .persona import Persona -from .ai_provider import create_ai_provider -import subprocess -import os - - -def create_mcp_server(data_dir: Path, enable_card: bool = False) -> Server: - """Create MCP server with ai.gpt tools""" - server = Server("aigpt") - persona = Persona(data_dir) - - @server.tool() - async def get_memories(limit: int = 10) -> List[Dict[str, Any]]: - """Get active memories from the AI's memory system""" - memories = persona.memory.get_active_memories(limit=limit) - return [ - { - "id": mem.id, - "content": mem.content, - "level": mem.level.value, - "importance": mem.importance_score, - "is_core": mem.is_core, - "timestamp": mem.timestamp.isoformat() - } - for mem in memories - ] - - @server.tool() - async def get_relationship(user_id: str) -> Dict[str, Any]: - """Get relationship status with a specific user""" - rel = persona.relationships.get_or_create_relationship(user_id) - return { - "user_id": rel.user_id, - "status": rel.status.value, - "score": rel.score, - "transmission_enabled": rel.transmission_enabled, - "is_broken": rel.is_broken, - "total_interactions": rel.total_interactions, - "last_interaction": rel.last_interaction.isoformat() if rel.last_interaction else None - } - - @server.tool() - async def process_interaction(user_id: str, message: str, provider: str = "ollama", model: str = "qwen2.5") -> Dict[str, Any]: - """Process an interaction with a user""" - ai_provider = create_ai_provider(provider, model) - response, relationship_delta = persona.process_interaction(user_id, message, ai_provider) - rel = persona.relationships.get_or_create_relationship(user_id) - - return { - "response": response, - "relationship_delta": relationship_delta, - "new_relationship_score": rel.score, - "transmission_enabled": rel.transmission_enabled, - "relationship_status": rel.status.value - } - - @server.tool() - async def get_fortune() -> Dict[str, Any]: - """Get today's AI fortune""" - fortune = persona.fortune_system.get_today_fortune() - modifiers = persona.fortune_system.get_personality_modifier(fortune) - - return { - "value": fortune.fortune_value, - "date": fortune.date.isoformat(), - "consecutive_good": fortune.consecutive_good, - "consecutive_bad": fortune.consecutive_bad, - "breakthrough": fortune.breakthrough_triggered, - "personality_modifiers": modifiers - } - - @server.tool() - async def execute_command(command: str, working_dir: str = ".") -> Dict[str, Any]: - """Execute a shell command""" - try: - import shlex - result = subprocess.run( - shlex.split(command), - cwd=working_dir, - capture_output=True, - text=True, - timeout=60 - ) - - return { - "status": "success" if result.returncode == 0 else "error", - "returncode": result.returncode, - "stdout": result.stdout, - "stderr": result.stderr, - "command": command - } - except subprocess.TimeoutExpired: - return {"error": "Command timed out"} - except Exception as e: - return {"error": str(e)} - - @server.tool() - async def analyze_file(file_path: str) -> Dict[str, Any]: - """Analyze a file using AI""" - try: - if not os.path.exists(file_path): - return {"error": f"File not found: {file_path}"} - - with open(file_path, 'r', encoding='utf-8') as f: - content = f.read() - - ai_provider = create_ai_provider("ollama", "qwen2.5") - - prompt = f"Analyze this file and provide insights:\\n\\nFile: {file_path}\\n\\nContent:\\n{content[:2000]}" - analysis = ai_provider.generate_response(prompt, "You are a code analyst.") - - return { - "analysis": analysis, - "file_path": file_path, - "file_size": len(content), - "line_count": len(content.split('\\n')) - } - except Exception as e: - return {"error": str(e)} - - return server - - -async def main(): - """Run MCP server""" - import sys - from mcp import stdio_server - - data_dir = Path.home() / ".config" / "syui" / "ai" / "gpt" / "data" - data_dir.mkdir(parents=True, exist_ok=True) - - server = create_mcp_server(data_dir) - await stdio_server(server) - - -if __name__ == "__main__": - import asyncio - asyncio.run(main()) \ No newline at end of file diff --git a/python_backup/src/aigpt/memory.py b/python_backup/src/aigpt/memory.py deleted file mode 100644 index f973a73..0000000 --- a/python_backup/src/aigpt/memory.py +++ /dev/null @@ -1,408 +0,0 @@ -"""Memory management system for ai.gpt""" - -import json -import hashlib -from datetime import datetime, timedelta -from pathlib import Path -from typing import List, Optional, Dict, Any -import logging - -from .models import Memory, MemoryLevel, Conversation - - -class MemoryManager: - """Manages AI's memory with hierarchical storage and forgetting""" - - def __init__(self, data_dir: Path): - self.data_dir = data_dir - self.memories_file = data_dir / "memories.json" - self.conversations_file = data_dir / "conversations.json" - self.memories: Dict[str, Memory] = {} - self.conversations: List[Conversation] = [] - self.logger = logging.getLogger(__name__) - self._load_memories() - - def _load_memories(self): - """Load memories from persistent storage""" - if self.memories_file.exists(): - with open(self.memories_file, 'r', encoding='utf-8') as f: - data = json.load(f) - for mem_data in data: - memory = Memory(**mem_data) - self.memories[memory.id] = memory - - if self.conversations_file.exists(): - with open(self.conversations_file, 'r', encoding='utf-8') as f: - data = json.load(f) - self.conversations = [Conversation(**conv) for conv in data] - - def _save_memories(self): - """Save memories to persistent storage""" - memories_data = [mem.model_dump(mode='json') for mem in self.memories.values()] - with open(self.memories_file, 'w', encoding='utf-8') as f: - json.dump(memories_data, f, indent=2, default=str) - - conv_data = [conv.model_dump(mode='json') for conv in self.conversations] - with open(self.conversations_file, 'w', encoding='utf-8') as f: - json.dump(conv_data, f, indent=2, default=str) - - def add_conversation(self, conversation: Conversation) -> Memory: - """Add a conversation and create memory from it""" - self.conversations.append(conversation) - - # Create memory from conversation - memory_id = hashlib.sha256( - f"{conversation.id}{conversation.timestamp}".encode() - ).hexdigest()[:16] - - memory = Memory( - id=memory_id, - timestamp=conversation.timestamp, - content=f"User: {conversation.user_message}\nAI: {conversation.ai_response}", - level=MemoryLevel.FULL_LOG, - importance_score=abs(conversation.relationship_delta) * 0.1 - ) - - self.memories[memory.id] = memory - self._save_memories() - return memory - - def add_memory(self, memory: Memory): - """Add a memory directly to the system""" - self.memories[memory.id] = memory - self._save_memories() - - def create_smart_summary(self, user_id: str, ai_provider=None) -> Optional[Memory]: - """Create AI-powered thematic summary from recent memories""" - recent_memories = [ - mem for mem in self.memories.values() - if mem.level == MemoryLevel.FULL_LOG - and (datetime.now() - mem.timestamp).days < 7 - ] - - if len(recent_memories) < 5: - return None - - # Sort by timestamp for chronological analysis - recent_memories.sort(key=lambda m: m.timestamp) - - # Prepare conversation context for AI analysis - conversations_text = "\n\n".join([ - f"[{mem.timestamp.strftime('%Y-%m-%d %H:%M')}] {mem.content}" - for mem in recent_memories - ]) - - summary_prompt = f""" -Analyze these recent conversations and create a thematic summary focusing on: -1. Communication patterns and user preferences -2. Technical topics and problem-solving approaches -3. Relationship progression and trust level -4. Key recurring themes and interests - -Conversations: -{conversations_text} - -Create a concise summary (2-3 sentences) that captures the essence of this interaction period: -""" - - try: - if ai_provider: - summary_content = ai_provider.chat(summary_prompt, max_tokens=200) - else: - # Fallback to pattern-based analysis - themes = self._extract_themes(recent_memories) - summary_content = f"Themes: {', '.join(themes[:3])}. {len(recent_memories)} interactions with focus on technical discussions." - except Exception as e: - self.logger.warning(f"AI summary failed, using fallback: {e}") - themes = self._extract_themes(recent_memories) - summary_content = f"Themes: {', '.join(themes[:3])}. {len(recent_memories)} interactions." - - summary_id = hashlib.sha256( - f"summary_{datetime.now().isoformat()}".encode() - ).hexdigest()[:16] - - summary = Memory( - id=summary_id, - timestamp=datetime.now(), - content=f"SUMMARY ({len(recent_memories)} conversations): {summary_content}", - summary=summary_content, - level=MemoryLevel.SUMMARY, - importance_score=0.6, - metadata={ - "memory_count": len(recent_memories), - "time_span": f"{recent_memories[0].timestamp.date()} to {recent_memories[-1].timestamp.date()}", - "themes": self._extract_themes(recent_memories)[:5] - } - ) - - self.memories[summary.id] = summary - - # Reduce importance of summarized memories - for mem in recent_memories: - mem.importance_score *= 0.8 - - self._save_memories() - return summary - - def _extract_themes(self, memories: List[Memory]) -> List[str]: - """Extract common themes from memory content""" - common_words = {} - for memory in memories: - # Simple keyword extraction - words = memory.content.lower().split() - for word in words: - if len(word) > 4 and word.isalpha(): - common_words[word] = common_words.get(word, 0) + 1 - - # Return most frequent meaningful words - return sorted(common_words.keys(), key=common_words.get, reverse=True)[:10] - - def create_core_memory(self, ai_provider=None) -> Optional[Memory]: - """Analyze all memories to extract core personality-forming elements""" - # Collect all non-forgotten memories for analysis - all_memories = [ - mem for mem in self.memories.values() - if mem.level != MemoryLevel.FORGOTTEN - ] - - if len(all_memories) < 10: - return None - - # Sort by importance and timestamp for comprehensive analysis - all_memories.sort(key=lambda m: (m.importance_score, m.timestamp), reverse=True) - - # Prepare memory context for AI analysis - memory_context = "\n".join([ - f"[{mem.level.value}] {mem.timestamp.strftime('%Y-%m-%d')}: {mem.content[:200]}..." - for mem in all_memories[:20] # Top 20 memories - ]) - - core_prompt = f""" -Analyze these conversations and memories to identify core personality elements that define this user relationship: - -1. Communication style and preferences -2. Core values and principles -3. Problem-solving patterns -4. Trust level and relationship depth -5. Unique characteristics that make this relationship special - -Memories: -{memory_context} - -Extract the essential personality-forming elements (2-3 sentences) that should NEVER be forgotten: -""" - - try: - if ai_provider: - core_content = ai_provider.chat(core_prompt, max_tokens=150) - else: - # Fallback to pattern analysis - user_patterns = self._analyze_user_patterns(all_memories) - core_content = f"User shows {user_patterns['communication_style']} communication, focuses on {user_patterns['main_interests']}, and demonstrates {user_patterns['problem_solving']} approach." - except Exception as e: - self.logger.warning(f"AI core analysis failed, using fallback: {e}") - user_patterns = self._analyze_user_patterns(all_memories) - core_content = f"Core pattern: {user_patterns['communication_style']} style, {user_patterns['main_interests']} interests." - - # Create core memory - core_id = hashlib.sha256( - f"core_{datetime.now().isoformat()}".encode() - ).hexdigest()[:16] - - core_memory = Memory( - id=core_id, - timestamp=datetime.now(), - content=f"CORE PERSONALITY: {core_content}", - summary=core_content, - level=MemoryLevel.CORE, - importance_score=1.0, - is_core=True, - metadata={ - "source_memories": len(all_memories), - "analysis_date": datetime.now().isoformat(), - "patterns": self._analyze_user_patterns(all_memories) - } - ) - - self.memories[core_memory.id] = core_memory - self._save_memories() - - self.logger.info(f"Core memory created: {core_id}") - return core_memory - - def _analyze_user_patterns(self, memories: List[Memory]) -> Dict[str, str]: - """Analyze patterns in user behavior from memories""" - # Extract patterns from conversation content - all_content = " ".join([mem.content.lower() for mem in memories]) - - # Simple pattern detection - communication_indicators = { - "technical": ["code", "implementation", "system", "api", "database"], - "casual": ["thanks", "please", "sorry", "help"], - "formal": ["could", "would", "should", "proper"] - } - - problem_solving_indicators = { - "systematic": ["first", "then", "next", "step", "plan"], - "experimental": ["try", "test", "experiment", "see"], - "theoretical": ["concept", "design", "architecture", "pattern"] - } - - # Score each pattern - communication_style = max( - communication_indicators.keys(), - key=lambda style: sum(all_content.count(word) for word in communication_indicators[style]) - ) - - problem_solving = max( - problem_solving_indicators.keys(), - key=lambda style: sum(all_content.count(word) for word in problem_solving_indicators[style]) - ) - - # Extract main interests from themes - themes = self._extract_themes(memories) - main_interests = ", ".join(themes[:3]) if themes else "general technology" - - return { - "communication_style": communication_style, - "problem_solving": problem_solving, - "main_interests": main_interests, - "interaction_count": len(memories) - } - - def identify_core_memories(self) -> List[Memory]: - """Identify existing memories that should become core (legacy method)""" - core_candidates = [ - mem for mem in self.memories.values() - if mem.importance_score > 0.8 - and not mem.is_core - and mem.level != MemoryLevel.FORGOTTEN - ] - - for memory in core_candidates: - memory.is_core = True - memory.level = MemoryLevel.CORE - self.logger.info(f"Memory {memory.id} promoted to core") - - self._save_memories() - return core_candidates - - def apply_forgetting(self): - """Apply selective forgetting based on importance and time""" - now = datetime.now() - - for memory in self.memories.values(): - if memory.is_core or memory.level == MemoryLevel.FORGOTTEN: - continue - - # Time-based decay - age_days = (now - memory.timestamp).days - decay_factor = memory.decay_rate * age_days - memory.importance_score -= decay_factor - - # Forget unimportant old memories - if memory.importance_score <= 0.1 and age_days > 30: - memory.level = MemoryLevel.FORGOTTEN - self.logger.info(f"Memory {memory.id} forgotten") - - self._save_memories() - - def get_active_memories(self, limit: int = 10) -> List[Memory]: - """Get currently active memories for persona (legacy method)""" - active = [ - mem for mem in self.memories.values() - if mem.level != MemoryLevel.FORGOTTEN - ] - - # Sort by importance and recency - active.sort( - key=lambda m: (m.is_core, m.importance_score, m.timestamp), - reverse=True - ) - - return active[:limit] - - def get_contextual_memories(self, query: str = "", limit: int = 10) -> Dict[str, List[Memory]]: - """Get memories organized by priority with contextual relevance""" - all_memories = [ - mem for mem in self.memories.values() - if mem.level != MemoryLevel.FORGOTTEN - ] - - # Categorize memories by type and importance - core_memories = [mem for mem in all_memories if mem.level == MemoryLevel.CORE] - summary_memories = [mem for mem in all_memories if mem.level == MemoryLevel.SUMMARY] - recent_memories = [ - mem for mem in all_memories - if mem.level == MemoryLevel.FULL_LOG - and (datetime.now() - mem.timestamp).days < 3 - ] - - # Apply keyword relevance if query provided - if query: - query_lower = query.lower() - - def relevance_score(memory: Memory) -> float: - content_score = 1 if query_lower in memory.content.lower() else 0 - summary_score = 1 if memory.summary and query_lower in memory.summary.lower() else 0 - metadata_score = 1 if any( - query_lower in str(v).lower() - for v in (memory.metadata or {}).values() - ) else 0 - return content_score + summary_score + metadata_score - - # Re-rank by relevance while maintaining type priority - core_memories.sort(key=lambda m: (relevance_score(m), m.importance_score), reverse=True) - summary_memories.sort(key=lambda m: (relevance_score(m), m.importance_score), reverse=True) - recent_memories.sort(key=lambda m: (relevance_score(m), m.importance_score), reverse=True) - else: - # Sort by importance and recency - core_memories.sort(key=lambda m: (m.importance_score, m.timestamp), reverse=True) - summary_memories.sort(key=lambda m: (m.importance_score, m.timestamp), reverse=True) - recent_memories.sort(key=lambda m: (m.importance_score, m.timestamp), reverse=True) - - # Return organized memory structure - return { - "core": core_memories[:3], # Always include top core memories - "summary": summary_memories[:3], # Recent summaries - "recent": recent_memories[:limit-6], # Fill remaining with recent - "all_active": all_memories[:limit] # Fallback for simple access - } - - def search_memories(self, keywords: List[str], memory_types: List[MemoryLevel] = None) -> List[Memory]: - """Search memories by keywords and optionally filter by memory types""" - if memory_types is None: - memory_types = [MemoryLevel.CORE, MemoryLevel.SUMMARY, MemoryLevel.FULL_LOG] - - matching_memories = [] - - for memory in self.memories.values(): - if memory.level not in memory_types or memory.level == MemoryLevel.FORGOTTEN: - continue - - # Check if any keyword matches in content, summary, or metadata - content_text = f"{memory.content} {memory.summary or ''}" - if memory.metadata: - content_text += " " + " ".join(str(v) for v in memory.metadata.values()) - - content_lower = content_text.lower() - - # Score by keyword matches - match_score = sum( - keyword.lower() in content_lower - for keyword in keywords - ) - - if match_score > 0: - # Add match score to memory for sorting - memory_copy = memory.model_copy() - memory_copy.importance_score += match_score * 0.1 - matching_memories.append(memory_copy) - - # Sort by relevance (match score + importance + core status) - matching_memories.sort( - key=lambda m: (m.is_core, m.importance_score, m.timestamp), - reverse=True - ) - - return matching_memories \ No newline at end of file diff --git a/python_backup/src/aigpt/models.py b/python_backup/src/aigpt/models.py deleted file mode 100644 index 1039af8..0000000 --- a/python_backup/src/aigpt/models.py +++ /dev/null @@ -1,88 +0,0 @@ -"""Data models for ai.gpt system""" - -from datetime import datetime, date -from typing import Optional, Dict, List, Any -from enum import Enum -from pydantic import BaseModel, Field, field_validator - - -class MemoryLevel(str, Enum): - """Memory importance levels""" - FULL_LOG = "full_log" - SUMMARY = "summary" - CORE = "core" - FORGOTTEN = "forgotten" - - -class RelationshipStatus(str, Enum): - """Relationship status levels""" - STRANGER = "stranger" - ACQUAINTANCE = "acquaintance" - FRIEND = "friend" - CLOSE_FRIEND = "close_friend" - BROKEN = "broken" # 不可逆 - - -class Memory(BaseModel): - """Single memory unit""" - id: str - timestamp: datetime - content: str - summary: Optional[str] = None - level: MemoryLevel = MemoryLevel.FULL_LOG - importance_score: float - is_core: bool = False - decay_rate: float = 0.01 - metadata: Optional[Dict[str, Any]] = None - - @field_validator('importance_score') - @classmethod - def validate_importance_score(cls, v): - """Ensure importance_score is within valid range, handle floating point precision issues""" - if abs(v) < 1e-10: # Very close to zero - return 0.0 - return max(0.0, min(1.0, v)) - - -class Relationship(BaseModel): - """Relationship with a specific user""" - user_id: str # atproto DID - status: RelationshipStatus = RelationshipStatus.STRANGER - score: float = 0.0 - daily_interactions: int = 0 - total_interactions: int = 0 - last_interaction: Optional[datetime] = None - transmission_enabled: bool = False - threshold: float = 100.0 - decay_rate: float = 0.1 - daily_limit: int = 10 - is_broken: bool = False - - -class AIFortune(BaseModel): - """Daily AI fortune affecting personality""" - date: date - fortune_value: int = Field(ge=1, le=10) - consecutive_good: int = 0 - consecutive_bad: int = 0 - breakthrough_triggered: bool = False - - -class PersonaState(BaseModel): - """Current persona state""" - base_personality: Dict[str, float] - current_mood: str - fortune: AIFortune - active_memories: List[str] # Memory IDs - relationship_modifiers: Dict[str, float] - - -class Conversation(BaseModel): - """Conversation log entry""" - id: str - user_id: str - timestamp: datetime - user_message: str - ai_response: str - relationship_delta: float = 0.0 - memory_created: bool = False \ No newline at end of file diff --git a/python_backup/src/aigpt/persona.py b/python_backup/src/aigpt/persona.py deleted file mode 100644 index fa11106..0000000 --- a/python_backup/src/aigpt/persona.py +++ /dev/null @@ -1,263 +0,0 @@ -"""Persona management system integrating memory, relationships, and fortune""" - -import json -from datetime import datetime -from pathlib import Path -from typing import Dict, List, Optional -import logging - -from .models import PersonaState, Conversation -from .memory import MemoryManager -from .relationship import RelationshipTracker -from .fortune import FortuneSystem - - -class Persona: - """AI persona with unique characteristics based on interactions""" - - def __init__(self, data_dir: Path, name: str = "ai"): - self.data_dir = data_dir - self.name = name - self.memory = MemoryManager(data_dir) - self.relationships = RelationshipTracker(data_dir) - self.fortune_system = FortuneSystem(data_dir) - self.logger = logging.getLogger(__name__) - - # Base personality traits - self.base_personality = { - "curiosity": 0.7, - "empathy": 0.8, - "creativity": 0.6, - "patience": 0.7, - "optimism": 0.6 - } - - self.state_file = data_dir / "persona_state.json" - self._load_state() - - def _load_state(self): - """Load persona state from storage""" - if self.state_file.exists(): - with open(self.state_file, 'r', encoding='utf-8') as f: - data = json.load(f) - self.base_personality = data.get("base_personality", self.base_personality) - - def _save_state(self): - """Save persona state to storage""" - state_data = { - "base_personality": self.base_personality, - "last_updated": datetime.now().isoformat() - } - with open(self.state_file, 'w', encoding='utf-8') as f: - json.dump(state_data, f, indent=2) - - def get_current_state(self) -> PersonaState: - """Get current persona state including all modifiers""" - # Get today's fortune - fortune = self.fortune_system.get_today_fortune() - fortune_modifiers = self.fortune_system.get_personality_modifier(fortune) - - # Apply fortune modifiers to base personality - current_personality = {} - for trait, base_value in self.base_personality.items(): - modifier = fortune_modifiers.get(trait, 1.0) - current_personality[trait] = min(1.0, base_value * modifier) - - # Get active memories for context - active_memories = self.memory.get_active_memories(limit=5) - - # Determine mood based on fortune and recent interactions - mood = self._determine_mood(fortune.fortune_value) - - state = PersonaState( - base_personality=current_personality, - current_mood=mood, - fortune=fortune, - active_memories=[mem.id for mem in active_memories], - relationship_modifiers={} - ) - - return state - - def _determine_mood(self, fortune_value: int) -> str: - """Determine current mood based on fortune and other factors""" - if fortune_value >= 8: - return "joyful" - elif fortune_value >= 6: - return "cheerful" - elif fortune_value >= 4: - return "neutral" - elif fortune_value >= 2: - return "melancholic" - else: - return "contemplative" - - def build_context_prompt(self, user_id: str, current_message: str) -> str: - """Build context-aware prompt with relevant memories""" - # Get contextual memories based on current message - memory_groups = self.memory.get_contextual_memories(query=current_message, limit=8) - - # Build context sections - context_parts = [] - - # Core personality elements (always included) - if memory_groups["core"]: - core_context = "\n".join([mem.content for mem in memory_groups["core"]]) - context_parts.append(f"CORE PERSONALITY:\n{core_context}") - - # Recent summaries for context - if memory_groups["summary"]: - summary_context = "\n".join([mem.summary or mem.content for mem in memory_groups["summary"]]) - context_parts.append(f"RECENT PATTERNS:\n{summary_context}") - - # Recent specific interactions - if memory_groups["recent"]: - recent_context = "\n".join([ - f"[{mem.timestamp.strftime('%m-%d')}] {mem.content[:100]}..." - for mem in memory_groups["recent"][:3] - ]) - context_parts.append(f"RECENT INTERACTIONS:\n{recent_context}") - - # Get current persona state - state = self.get_current_state() - - # Build final prompt - context_prompt = f"""You are an AI with persistent memory and evolving relationships. Your current state: - -PERSONALITY: {', '.join([f'{k}={v:.1f}' for k, v in state.base_personality.items()])} -MOOD: {state.current_mood} -FORTUNE: {state.fortune.fortune_value}/10 - -""" - - if context_parts: - context_prompt += "RELEVANT CONTEXT:\n" + "\n\n".join(context_parts) + "\n\n" - - context_prompt += f"""IMPORTANT: You have access to the following tools: -- Memory tools: get_memories, search_memories, get_contextual_memories -- Relationship tools: get_relationship -- Card game tools: card_get_user_cards, card_draw_card, card_analyze_collection - -When asked about cards, collections, or anything card-related, YOU MUST use the card tools. -For "カードコレクションを見せて" or similar requests, use card_get_user_cards with did='{user_id}'. - -Respond to this message while staying true to your personality and the established relationship context: - -User: {current_message} - -AI:""" - - return context_prompt - - def process_interaction(self, user_id: str, message: str, ai_provider=None) -> tuple[str, float]: - """Process user interaction and generate response with enhanced context""" - # Get current state - state = self.get_current_state() - - # Get relationship with user - relationship = self.relationships.get_or_create_relationship(user_id) - - # Enhanced response generation with context awareness - if relationship.is_broken: - response = "..." - relationship_delta = 0.0 - else: - if ai_provider: - # Build context-aware prompt - context_prompt = self.build_context_prompt(user_id, message) - - # Generate response using AI with full context - try: - # Check if AI provider supports MCP - if hasattr(ai_provider, 'chat_with_mcp'): - import asyncio - response = asyncio.run(ai_provider.chat_with_mcp(context_prompt, max_tokens=2000, user_id=user_id)) - else: - response = ai_provider.chat(context_prompt, max_tokens=2000) - - # Clean up response if it includes the prompt echo - if "AI:" in response: - response = response.split("AI:")[-1].strip() - - except Exception as e: - self.logger.error(f"AI response generation failed: {e}") - response = f"I appreciate your message about {message[:50]}..." - - # Calculate relationship delta based on interaction quality and context - if state.current_mood in ["joyful", "cheerful"]: - relationship_delta = 2.0 - elif relationship.status.value == "close_friend": - relationship_delta = 1.5 - else: - relationship_delta = 1.0 - else: - # Context-aware fallback responses - memory_groups = self.memory.get_contextual_memories(query=message, limit=3) - - if memory_groups["core"]: - # Reference core memories for continuity - response = f"Based on our relationship, I think {message.lower()} connects to what we've discussed before." - relationship_delta = 1.5 - elif state.current_mood == "joyful": - response = f"What a wonderful day! {message} sounds interesting!" - relationship_delta = 2.0 - elif relationship.status.value == "close_friend": - response = f"I've been thinking about our conversations. {message}" - relationship_delta = 1.5 - else: - response = f"I understand. {message}" - relationship_delta = 1.0 - - # Create conversation record - conv_id = f"{user_id}_{datetime.now().timestamp()}" - conversation = Conversation( - id=conv_id, - user_id=user_id, - timestamp=datetime.now(), - user_message=message, - ai_response=response, - relationship_delta=relationship_delta, - memory_created=True - ) - - # Update memory - self.memory.add_conversation(conversation) - - # Update relationship - self.relationships.update_interaction(user_id, relationship_delta) - - return response, relationship_delta - - def can_transmit_to(self, user_id: str) -> bool: - """Check if AI can transmit messages to this user""" - relationship = self.relationships.get_or_create_relationship(user_id) - return relationship.transmission_enabled and not relationship.is_broken - - def daily_maintenance(self): - """Perform daily maintenance tasks""" - self.logger.info("Performing daily maintenance...") - - # Apply time decay to relationships - self.relationships.apply_time_decay() - - # Apply forgetting to memories - self.memory.apply_forgetting() - - # Identify core memories - core_memories = self.memory.identify_core_memories() - if core_memories: - self.logger.info(f"Identified {len(core_memories)} new core memories") - - # Create memory summaries - for user_id in self.relationships.relationships: - try: - from .ai_provider import create_ai_provider - ai_provider = create_ai_provider() - summary = self.memory.create_smart_summary(user_id, ai_provider=ai_provider) - if summary: - self.logger.info(f"Created smart summary for interactions with {user_id}") - except Exception as e: - self.logger.warning(f"Could not create AI summary for {user_id}: {e}") - - self._save_state() - self.logger.info("Daily maintenance completed") \ No newline at end of file diff --git a/python_backup/src/aigpt/project_manager.py b/python_backup/src/aigpt/project_manager.py deleted file mode 100644 index aae743f..0000000 --- a/python_backup/src/aigpt/project_manager.py +++ /dev/null @@ -1,321 +0,0 @@ -"""Project management and continuous development logic for ai.shell""" - -import json -import os -from pathlib import Path -from typing import Dict, List, Optional, Any -from datetime import datetime -import subprocess -import hashlib - -from .models import Memory -from .ai_provider import AIProvider - - -class ProjectState: - """プロジェクトの現在状態を追跡""" - - def __init__(self, project_root: Path): - self.project_root = project_root - self.files_state: Dict[str, str] = {} # ファイルパス: ハッシュ - self.last_analysis: Optional[datetime] = None - self.project_context: Optional[str] = None - self.development_goals: List[str] = [] - self.known_patterns: Dict[str, Any] = {} - - def scan_project_files(self) -> Dict[str, str]: - """プロジェクトファイルをスキャンしてハッシュ計算""" - current_state = {} - - # 対象ファイル拡張子 - target_extensions = {'.py', '.js', '.ts', '.rs', '.go', '.java', '.cpp', '.c', '.h'} - - for file_path in self.project_root.rglob('*'): - if (file_path.is_file() and - file_path.suffix in target_extensions and - not any(part.startswith('.') for part in file_path.parts)): - - try: - with open(file_path, 'r', encoding='utf-8') as f: - content = f.read() - - file_hash = hashlib.md5(content.encode()).hexdigest() - relative_path = str(file_path.relative_to(self.project_root)) - current_state[relative_path] = file_hash - except Exception: - continue - - return current_state - - def detect_changes(self) -> Dict[str, str]: - """ファイル変更を検出""" - current_state = self.scan_project_files() - changes = {} - - # 新規・変更ファイル - for path, current_hash in current_state.items(): - if path not in self.files_state or self.files_state[path] != current_hash: - changes[path] = "modified" if path in self.files_state else "added" - - # 削除ファイル - for path in self.files_state: - if path not in current_state: - changes[path] = "deleted" - - self.files_state = current_state - return changes - - -class ContinuousDeveloper: - """Claude Code的な継続開発機能""" - - def __init__(self, project_root: Path, ai_provider: Optional[AIProvider] = None): - self.project_root = project_root - self.ai_provider = ai_provider - self.project_state = ProjectState(project_root) - self.session_memory: List[str] = [] - - def load_project_context(self) -> str: - """プロジェクト文脈を読み込み""" - context_files = [ - "claude.md", "aishell.md", "README.md", - "pyproject.toml", "package.json", "Cargo.toml" - ] - - context_parts = [] - for filename in context_files: - file_path = self.project_root / filename - if file_path.exists(): - try: - with open(file_path, 'r', encoding='utf-8') as f: - content = f.read() - context_parts.append(f"## {filename}\n{content}") - except Exception: - continue - - return "\n\n".join(context_parts) - - def analyze_project_structure(self) -> Dict[str, Any]: - """プロジェクト構造を分析""" - analysis = { - "language": self._detect_primary_language(), - "framework": self._detect_framework(), - "structure": self._analyze_file_structure(), - "dependencies": self._analyze_dependencies(), - "patterns": self._detect_code_patterns() - } - return analysis - - def _detect_primary_language(self) -> str: - """主要言語を検出""" - file_counts = {} - for file_path in self.project_root.rglob('*'): - if file_path.is_file() and file_path.suffix: - ext = file_path.suffix.lower() - file_counts[ext] = file_counts.get(ext, 0) + 1 - - language_map = { - '.py': 'Python', - '.js': 'JavaScript', - '.ts': 'TypeScript', - '.rs': 'Rust', - '.go': 'Go', - '.java': 'Java' - } - - if file_counts: - primary_ext = max(file_counts.items(), key=lambda x: x[1])[0] - return language_map.get(primary_ext, 'Unknown') - return 'Unknown' - - def _detect_framework(self) -> str: - """フレームワークを検出""" - frameworks = { - 'fastapi': ['fastapi', 'uvicorn'], - 'django': ['django'], - 'flask': ['flask'], - 'react': ['react'], - 'next.js': ['next'], - 'rust-actix': ['actix-web'], - } - - # pyproject.toml, package.json, Cargo.tomlから依存関係を確認 - for config_file in ['pyproject.toml', 'package.json', 'Cargo.toml']: - config_path = self.project_root / config_file - if config_path.exists(): - try: - with open(config_path, 'r') as f: - content = f.read().lower() - - for framework, keywords in frameworks.items(): - if any(keyword in content for keyword in keywords): - return framework - except Exception: - continue - - return 'Unknown' - - def _analyze_file_structure(self) -> Dict[str, List[str]]: - """ファイル構造を分析""" - structure = {"directories": [], "key_files": []} - - for item in self.project_root.iterdir(): - if item.is_dir() and not item.name.startswith('.'): - structure["directories"].append(item.name) - elif item.is_file() and item.name in [ - 'main.py', 'app.py', 'index.js', 'main.rs', 'main.go' - ]: - structure["key_files"].append(item.name) - - return structure - - def _analyze_dependencies(self) -> List[str]: - """依存関係を分析""" - deps = [] - - # Python dependencies - pyproject = self.project_root / "pyproject.toml" - if pyproject.exists(): - try: - with open(pyproject, 'r') as f: - content = f.read() - # Simple regex would be better but for now just check for common packages - common_packages = ['fastapi', 'pydantic', 'uvicorn', 'ollama', 'openai'] - for package in common_packages: - if package in content: - deps.append(package) - except Exception: - pass - - return deps - - def _detect_code_patterns(self) -> Dict[str, int]: - """コードパターンを検出""" - patterns = { - "classes": 0, - "functions": 0, - "api_endpoints": 0, - "async_functions": 0 - } - - for py_file in self.project_root.rglob('*.py'): - try: - with open(py_file, 'r', encoding='utf-8') as f: - content = f.read() - - patterns["classes"] += content.count('class ') - patterns["functions"] += content.count('def ') - patterns["api_endpoints"] += content.count('@app.') - patterns["async_functions"] += content.count('async def') - except Exception: - continue - - return patterns - - def suggest_next_steps(self, current_task: Optional[str] = None) -> List[str]: - """次のステップを提案""" - if not self.ai_provider: - return ["AI provider not available for suggestions"] - - context = self.load_project_context() - analysis = self.analyze_project_structure() - changes = self.project_state.detect_changes() - - prompt = f""" -プロジェクト分析に基づいて、次の開発ステップを3-5個提案してください。 - -## プロジェクト文脈 -{context[:1000]} - -## 構造分析 -言語: {analysis['language']} -フレームワーク: {analysis['framework']} -パターン: {analysis['patterns']} - -## 最近の変更 -{changes} - -## 現在のタスク -{current_task or "特になし"} - -具体的で実行可能なステップを提案してください: -""" - - try: - response = self.ai_provider.chat(prompt, max_tokens=300) - # Simple parsing - in real implementation would be more sophisticated - steps = [line.strip() for line in response.split('\n') - if line.strip() and (line.strip().startswith('-') or line.strip().startswith('1.'))] - return steps[:5] - except Exception as e: - return [f"Error generating suggestions: {str(e)}"] - - def generate_code(self, description: str, file_path: Optional[str] = None) -> str: - """コード生成""" - if not self.ai_provider: - return "AI provider not available for code generation" - - context = self.load_project_context() - analysis = self.analyze_project_structure() - - prompt = f""" -以下の仕様に基づいてコードを生成してください。 - -## プロジェクト文脈 -{context[:800]} - -## 言語・フレームワーク -言語: {analysis['language']} -フレームワーク: {analysis['framework']} -既存パターン: {analysis['patterns']} - -## 生成要求 -{description} - -{"ファイルパス: " + file_path if file_path else ""} - -プロジェクトの既存コードスタイルと一貫性を保ったコードを生成してください: -""" - - try: - return self.ai_provider.chat(prompt, max_tokens=500) - except Exception as e: - return f"Error generating code: {str(e)}" - - def analyze_file(self, file_path: str) -> str: - """ファイル分析""" - full_path = self.project_root / file_path - if not full_path.exists(): - return f"File not found: {file_path}" - - try: - with open(full_path, 'r', encoding='utf-8') as f: - content = f.read() - except Exception as e: - return f"Error reading file: {str(e)}" - - if not self.ai_provider: - return f"File contents ({len(content)} chars):\n{content[:200]}..." - - context = self.load_project_context() - - prompt = f""" -以下のファイルを分析して、改善点や問題点を指摘してください。 - -## プロジェクト文脈 -{context[:500]} - -## ファイル: {file_path} -{content[:1500]} - -分析内容: -1. コード品質 -2. プロジェクトとの整合性 -3. 改善提案 -4. 潜在的な問題 -""" - - try: - return self.ai_provider.chat(prompt, max_tokens=400) - except Exception as e: - return f"Error analyzing file: {str(e)}" \ No newline at end of file diff --git a/python_backup/src/aigpt/relationship.py b/python_backup/src/aigpt/relationship.py deleted file mode 100644 index 31dac43..0000000 --- a/python_backup/src/aigpt/relationship.py +++ /dev/null @@ -1,135 +0,0 @@ -"""Relationship tracking system with irreversible damage""" - -import json -from datetime import datetime, timedelta -from pathlib import Path -from typing import Dict, Optional -import logging - -from .models import Relationship, RelationshipStatus - - -class RelationshipTracker: - """Tracks and manages relationships with users""" - - def __init__(self, data_dir: Path): - self.data_dir = data_dir - self.relationships_file = data_dir / "relationships.json" - self.relationships: Dict[str, Relationship] = {} - self.logger = logging.getLogger(__name__) - self._load_relationships() - - def _load_relationships(self): - """Load relationships from persistent storage""" - if self.relationships_file.exists(): - with open(self.relationships_file, 'r', encoding='utf-8') as f: - data = json.load(f) - for user_id, rel_data in data.items(): - self.relationships[user_id] = Relationship(**rel_data) - - def _save_relationships(self): - """Save relationships to persistent storage""" - data = { - user_id: rel.model_dump(mode='json') - for user_id, rel in self.relationships.items() - } - with open(self.relationships_file, 'w', encoding='utf-8') as f: - json.dump(data, f, indent=2, default=str) - - def get_or_create_relationship(self, user_id: str) -> Relationship: - """Get existing relationship or create new one""" - if user_id not in self.relationships: - self.relationships[user_id] = Relationship(user_id=user_id) - self._save_relationships() - return self.relationships[user_id] - - def update_interaction(self, user_id: str, delta: float) -> Relationship: - """Update relationship based on interaction""" - rel = self.get_or_create_relationship(user_id) - - # Check if relationship is broken (irreversible) - if rel.is_broken: - self.logger.warning(f"Relationship with {user_id} is broken. No updates allowed.") - return rel - - # Check daily limit - if rel.last_interaction and rel.last_interaction.date() == datetime.now().date(): - if rel.daily_interactions >= rel.daily_limit: - self.logger.info(f"Daily interaction limit reached for {user_id}") - return rel - else: - rel.daily_interactions = 0 - - # Update interaction counts - rel.daily_interactions += 1 - rel.total_interactions += 1 - rel.last_interaction = datetime.now() - - # Update score with bounds - old_score = rel.score - rel.score += delta - rel.score = max(0.0, min(200.0, rel.score)) # 0-200 range - - # Check for relationship damage - if delta < -10.0: # Significant negative interaction - self.logger.warning(f"Major relationship damage with {user_id}: {delta}") - if rel.score <= 0: - rel.is_broken = True - rel.status = RelationshipStatus.BROKEN - rel.transmission_enabled = False - self.logger.error(f"Relationship with {user_id} is now BROKEN (irreversible)") - - # Update relationship status based on score - if not rel.is_broken: - if rel.score >= 150: - rel.status = RelationshipStatus.CLOSE_FRIEND - elif rel.score >= 100: - rel.status = RelationshipStatus.FRIEND - elif rel.score >= 50: - rel.status = RelationshipStatus.ACQUAINTANCE - else: - rel.status = RelationshipStatus.STRANGER - - # Check transmission threshold - if rel.score >= rel.threshold and not rel.transmission_enabled: - rel.transmission_enabled = True - self.logger.info(f"Transmission enabled for {user_id}!") - - self._save_relationships() - return rel - - def apply_time_decay(self): - """Apply time-based decay to all relationships""" - now = datetime.now() - - for user_id, rel in self.relationships.items(): - if rel.is_broken or not rel.last_interaction: - continue - - # Calculate days since last interaction - days_inactive = (now - rel.last_interaction).days - - if days_inactive > 0: - # Apply decay - decay_amount = rel.decay_rate * days_inactive - old_score = rel.score - rel.score = max(0.0, rel.score - decay_amount) - - # Update status if score dropped - if rel.score < rel.threshold: - rel.transmission_enabled = False - - if decay_amount > 0: - self.logger.info( - f"Applied decay to {user_id}: {old_score:.2f} -> {rel.score:.2f}" - ) - - self._save_relationships() - - def get_transmission_eligible(self) -> Dict[str, Relationship]: - """Get all relationships eligible for transmission""" - return { - user_id: rel - for user_id, rel in self.relationships.items() - if rel.transmission_enabled and not rel.is_broken - } \ No newline at end of file diff --git a/python_backup/src/aigpt/scheduler.py b/python_backup/src/aigpt/scheduler.py deleted file mode 100644 index df26cf4..0000000 --- a/python_backup/src/aigpt/scheduler.py +++ /dev/null @@ -1,312 +0,0 @@ -"""Scheduler for autonomous AI tasks""" - -import json -import asyncio -from datetime import datetime, timedelta -from pathlib import Path -from typing import Dict, List, Optional, Any, Callable -from enum import Enum -import logging - -from apscheduler.schedulers.asyncio import AsyncIOScheduler -from apscheduler.triggers.cron import CronTrigger -from apscheduler.triggers.interval import IntervalTrigger -from croniter import croniter - -from .persona import Persona -from .transmission import TransmissionController -from .ai_provider import create_ai_provider - - -class TaskType(str, Enum): - """Types of scheduled tasks""" - TRANSMISSION_CHECK = "transmission_check" - MAINTENANCE = "maintenance" - FORTUNE_UPDATE = "fortune_update" - RELATIONSHIP_DECAY = "relationship_decay" - MEMORY_SUMMARY = "memory_summary" - CUSTOM = "custom" - - -class ScheduledTask: - """Represents a scheduled task""" - - def __init__( - self, - task_id: str, - task_type: TaskType, - schedule: str, # Cron expression or interval - enabled: bool = True, - last_run: Optional[datetime] = None, - next_run: Optional[datetime] = None, - metadata: Optional[Dict[str, Any]] = None - ): - self.task_id = task_id - self.task_type = task_type - self.schedule = schedule - self.enabled = enabled - self.last_run = last_run - self.next_run = next_run - self.metadata = metadata or {} - - def to_dict(self) -> Dict[str, Any]: - """Convert to dictionary for storage""" - return { - "task_id": self.task_id, - "task_type": self.task_type.value, - "schedule": self.schedule, - "enabled": self.enabled, - "last_run": self.last_run.isoformat() if self.last_run else None, - "next_run": self.next_run.isoformat() if self.next_run else None, - "metadata": self.metadata - } - - @classmethod - def from_dict(cls, data: Dict[str, Any]) -> "ScheduledTask": - """Create from dictionary""" - return cls( - task_id=data["task_id"], - task_type=TaskType(data["task_type"]), - schedule=data["schedule"], - enabled=data.get("enabled", True), - last_run=datetime.fromisoformat(data["last_run"]) if data.get("last_run") else None, - next_run=datetime.fromisoformat(data["next_run"]) if data.get("next_run") else None, - metadata=data.get("metadata", {}) - ) - - -class AIScheduler: - """Manages scheduled tasks for the AI system""" - - def __init__(self, data_dir: Path, persona: Persona): - self.data_dir = data_dir - self.persona = persona - self.tasks_file = data_dir / "scheduled_tasks.json" - self.tasks: Dict[str, ScheduledTask] = {} - self.scheduler = AsyncIOScheduler() - self.logger = logging.getLogger(__name__) - self._load_tasks() - - # Task handlers - self.task_handlers: Dict[TaskType, Callable] = { - TaskType.TRANSMISSION_CHECK: self._handle_transmission_check, - TaskType.MAINTENANCE: self._handle_maintenance, - TaskType.FORTUNE_UPDATE: self._handle_fortune_update, - TaskType.RELATIONSHIP_DECAY: self._handle_relationship_decay, - TaskType.MEMORY_SUMMARY: self._handle_memory_summary, - } - - def _load_tasks(self): - """Load scheduled tasks from storage""" - if self.tasks_file.exists(): - with open(self.tasks_file, 'r', encoding='utf-8') as f: - data = json.load(f) - for task_data in data: - task = ScheduledTask.from_dict(task_data) - self.tasks[task.task_id] = task - - def _save_tasks(self): - """Save scheduled tasks to storage""" - tasks_data = [task.to_dict() for task in self.tasks.values()] - with open(self.tasks_file, 'w', encoding='utf-8') as f: - json.dump(tasks_data, f, indent=2, default=str) - - def add_task( - self, - task_type: TaskType, - schedule: str, - task_id: Optional[str] = None, - metadata: Optional[Dict[str, Any]] = None - ) -> ScheduledTask: - """Add a new scheduled task""" - if task_id is None: - task_id = f"{task_type.value}_{datetime.now().timestamp()}" - - # Validate schedule - if not self._validate_schedule(schedule): - raise ValueError(f"Invalid schedule expression: {schedule}") - - task = ScheduledTask( - task_id=task_id, - task_type=task_type, - schedule=schedule, - metadata=metadata - ) - - self.tasks[task_id] = task - self._save_tasks() - - # Schedule the task if scheduler is running - if self.scheduler.running: - self._schedule_task(task) - - self.logger.info(f"Added task {task_id} with schedule {schedule}") - return task - - def _validate_schedule(self, schedule: str) -> bool: - """Validate schedule expression""" - # Check if it's a cron expression - if ' ' in schedule: - try: - croniter(schedule) - return True - except: - return False - - # Check if it's an interval expression (e.g., "5m", "1h", "2d") - import re - pattern = r'^\d+[smhd]$' - return bool(re.match(pattern, schedule)) - - def _parse_interval(self, interval: str) -> int: - """Parse interval string to seconds""" - unit = interval[-1] - value = int(interval[:-1]) - - multipliers = { - 's': 1, - 'm': 60, - 'h': 3600, - 'd': 86400 - } - - return value * multipliers.get(unit, 1) - - def _schedule_task(self, task: ScheduledTask): - """Schedule a task with APScheduler""" - if not task.enabled: - return - - handler = self.task_handlers.get(task.task_type) - if not handler: - self.logger.warning(f"No handler for task type {task.task_type}") - return - - # Determine trigger - if ' ' in task.schedule: - # Cron expression - trigger = CronTrigger.from_crontab(task.schedule) - else: - # Interval expression - seconds = self._parse_interval(task.schedule) - trigger = IntervalTrigger(seconds=seconds) - - # Add job - self.scheduler.add_job( - lambda: asyncio.create_task(self._run_task(task)), - trigger=trigger, - id=task.task_id, - replace_existing=True - ) - - async def _run_task(self, task: ScheduledTask): - """Run a scheduled task""" - self.logger.info(f"Running task {task.task_id}") - - task.last_run = datetime.now() - - try: - handler = self.task_handlers.get(task.task_type) - if handler: - await handler(task) - else: - self.logger.warning(f"No handler for task type {task.task_type}") - except Exception as e: - self.logger.error(f"Error running task {task.task_id}: {e}") - - self._save_tasks() - - async def _handle_transmission_check(self, task: ScheduledTask): - """Check and execute autonomous transmissions""" - controller = TransmissionController(self.persona, self.data_dir) - eligible = controller.check_transmission_eligibility() - - # Get AI provider from metadata - provider_name = task.metadata.get("provider", "ollama") - model = task.metadata.get("model", "qwen2.5") - - try: - ai_provider = create_ai_provider(provider_name, model) - except: - ai_provider = None - - for user_id, rel in eligible.items(): - message = controller.generate_transmission_message(user_id) - if message: - # For now, just print the message - print(f"\n🤖 [AI Transmission] {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}") - print(f"To: {user_id}") - print(f"Relationship: {rel.status.value} (score: {rel.score:.2f})") - print(f"Message: {message}") - print("-" * 50) - - controller.record_transmission(user_id, message, success=True) - self.logger.info(f"Transmitted to {user_id}: {message}") - - async def _handle_maintenance(self, task: ScheduledTask): - """Run daily maintenance""" - self.persona.daily_maintenance() - self.logger.info("Daily maintenance completed") - - async def _handle_fortune_update(self, task: ScheduledTask): - """Update AI fortune""" - fortune = self.persona.fortune_system.get_today_fortune() - self.logger.info(f"Fortune updated: {fortune.fortune_value}/10") - - async def _handle_relationship_decay(self, task: ScheduledTask): - """Apply relationship decay""" - self.persona.relationships.apply_time_decay() - self.logger.info("Relationship decay applied") - - async def _handle_memory_summary(self, task: ScheduledTask): - """Create memory summaries""" - for user_id in self.persona.relationships.relationships: - summary = self.persona.memory.summarize_memories(user_id) - if summary: - self.logger.info(f"Created memory summary for {user_id}") - - def start(self): - """Start the scheduler""" - # Schedule all enabled tasks - for task in self.tasks.values(): - if task.enabled: - self._schedule_task(task) - - self.scheduler.start() - self.logger.info("Scheduler started") - - def stop(self): - """Stop the scheduler""" - self.scheduler.shutdown() - self.logger.info("Scheduler stopped") - - def get_tasks(self) -> List[ScheduledTask]: - """Get all scheduled tasks""" - return list(self.tasks.values()) - - def enable_task(self, task_id: str): - """Enable a task""" - if task_id in self.tasks: - self.tasks[task_id].enabled = True - self._save_tasks() - if self.scheduler.running: - self._schedule_task(self.tasks[task_id]) - - def disable_task(self, task_id: str): - """Disable a task""" - if task_id in self.tasks: - self.tasks[task_id].enabled = False - self._save_tasks() - if self.scheduler.running: - self.scheduler.remove_job(task_id) - - def remove_task(self, task_id: str): - """Remove a task""" - if task_id in self.tasks: - del self.tasks[task_id] - self._save_tasks() - if self.scheduler.running: - try: - self.scheduler.remove_job(task_id) - except: - pass \ No newline at end of file diff --git a/python_backup/src/aigpt/shared/__init__.py b/python_backup/src/aigpt/shared/__init__.py deleted file mode 100644 index db34cb5..0000000 --- a/python_backup/src/aigpt/shared/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -"""Shared modules for AI ecosystem""" - -from .ai_provider import ( - AIProvider, - OllamaProvider, - OpenAIProvider, - create_ai_provider -) - -__all__ = [ - 'AIProvider', - 'OllamaProvider', - 'OpenAIProvider', - 'create_ai_provider' -] \ No newline at end of file diff --git a/python_backup/src/aigpt/shared/ai_provider.py b/python_backup/src/aigpt/shared/ai_provider.py deleted file mode 100644 index 87a65a6..0000000 --- a/python_backup/src/aigpt/shared/ai_provider.py +++ /dev/null @@ -1,139 +0,0 @@ -"""Shared AI Provider implementation for ai ecosystem""" - -import os -import json -import logging -from typing import Optional, Dict, List, Any, Protocol -from abc import abstractmethod -import httpx -from openai import OpenAI -import ollama - - -class AIProvider(Protocol): - """Protocol for AI providers""" - - @abstractmethod - async def chat(self, prompt: str, system_prompt: Optional[str] = None) -> str: - """Generate a response based on prompt""" - pass - - -class OllamaProvider: - """Ollama AI provider - shared implementation""" - - def __init__(self, model: str = "qwen3", host: Optional[str] = None, config_system_prompt: Optional[str] = None): - self.model = model - # Use environment variable OLLAMA_HOST if available - self.host = host or os.getenv('OLLAMA_HOST', 'http://127.0.0.1:11434') - # Ensure proper URL format - if not self.host.startswith('http'): - self.host = f'http://{self.host}' - self.client = ollama.Client(host=self.host, timeout=60.0) - self.logger = logging.getLogger(__name__) - self.logger.info(f"OllamaProvider initialized with host: {self.host}, model: {self.model}") - self.config_system_prompt = config_system_prompt - - async def chat(self, prompt: str, system_prompt: Optional[str] = None) -> str: - """Simple chat interface""" - try: - messages = [] - # Use provided system_prompt, fall back to config_system_prompt - final_system_prompt = system_prompt or self.config_system_prompt - if final_system_prompt: - messages.append({"role": "system", "content": final_system_prompt}) - messages.append({"role": "user", "content": prompt}) - - response = self.client.chat( - model=self.model, - messages=messages, - options={ - "num_predict": 2000, - "temperature": 0.7, - "top_p": 0.9, - }, - stream=False - ) - return self._clean_response(response['message']['content']) - except Exception as e: - self.logger.error(f"Ollama chat failed (host: {self.host}): {e}") - return "I'm having trouble connecting to the AI model." - - def _clean_response(self, response: str) -> str: - """Clean response by removing think tags and other unwanted content""" - import re - # Remove tags and their content - response = re.sub(r'.*?', '', response, flags=re.DOTALL) - # Remove any remaining whitespace at the beginning/end - response = response.strip() - return response - - -class OpenAIProvider: - """OpenAI API provider - shared implementation""" - - def __init__(self, model: str = "gpt-4o-mini", api_key: Optional[str] = None, - config_system_prompt: Optional[str] = None, mcp_client=None): - self.model = model - self.api_key = api_key or os.getenv("OPENAI_API_KEY") - if not self.api_key: - raise ValueError("OpenAI API key not provided") - self.client = OpenAI(api_key=self.api_key) - self.logger = logging.getLogger(__name__) - self.config_system_prompt = config_system_prompt - self.mcp_client = mcp_client - - async def chat(self, prompt: str, system_prompt: Optional[str] = None) -> str: - """Simple chat interface without MCP tools""" - try: - messages = [] - # Use provided system_prompt, fall back to config_system_prompt - final_system_prompt = system_prompt or self.config_system_prompt - if final_system_prompt: - messages.append({"role": "system", "content": final_system_prompt}) - messages.append({"role": "user", "content": prompt}) - - response = self.client.chat.completions.create( - model=self.model, - messages=messages, - max_tokens=2000, - temperature=0.7 - ) - return response.choices[0].message.content - except Exception as e: - self.logger.error(f"OpenAI chat failed: {e}") - return "I'm having trouble connecting to the AI model." - - def _get_mcp_tools(self) -> List[Dict[str, Any]]: - """Override this method in subclasses to provide MCP tools""" - return [] - - async def chat_with_mcp(self, prompt: str, **kwargs) -> str: - """Chat interface with MCP function calling support - - This method should be overridden in subclasses to provide - specific MCP functionality. - """ - if not self.mcp_client: - return await self.chat(prompt) - - # Default implementation - subclasses should override - return await self.chat(prompt) - - async def _execute_mcp_tool(self, tool_call, **kwargs) -> Dict[str, Any]: - """Execute MCP tool call - override in subclasses""" - return {"error": "MCP tool execution not implemented"} - - -def create_ai_provider(provider: str = "ollama", model: Optional[str] = None, - config_system_prompt: Optional[str] = None, mcp_client=None, **kwargs) -> AIProvider: - """Factory function to create AI providers""" - if provider == "ollama": - model = model or "qwen3" - return OllamaProvider(model=model, config_system_prompt=config_system_prompt, **kwargs) - elif provider == "openai": - model = model or "gpt-4o-mini" - return OpenAIProvider(model=model, config_system_prompt=config_system_prompt, - mcp_client=mcp_client, **kwargs) - else: - raise ValueError(f"Unknown provider: {provider}") \ No newline at end of file diff --git a/python_backup/src/aigpt/transmission.py b/python_backup/src/aigpt/transmission.py deleted file mode 100644 index 6eba250..0000000 --- a/python_backup/src/aigpt/transmission.py +++ /dev/null @@ -1,111 +0,0 @@ -"""Transmission controller for autonomous message sending""" - -import json -from datetime import datetime -from pathlib import Path -from typing import List, Dict, Optional -import logging - -from .models import Relationship -from .persona import Persona - - -class TransmissionController: - """Controls when and how AI transmits messages autonomously""" - - def __init__(self, persona: Persona, data_dir: Path): - self.persona = persona - self.data_dir = data_dir - self.transmission_log_file = data_dir / "transmissions.json" - self.transmissions: List[Dict] = [] - self.logger = logging.getLogger(__name__) - self._load_transmissions() - - def _load_transmissions(self): - """Load transmission history""" - if self.transmission_log_file.exists(): - with open(self.transmission_log_file, 'r', encoding='utf-8') as f: - self.transmissions = json.load(f) - - def _save_transmissions(self): - """Save transmission history""" - with open(self.transmission_log_file, 'w', encoding='utf-8') as f: - json.dump(self.transmissions, f, indent=2, default=str) - - def check_transmission_eligibility(self) -> Dict[str, Relationship]: - """Check which users are eligible for transmission""" - eligible = self.persona.relationships.get_transmission_eligible() - - # Additional checks could be added here - # - Time since last transmission - # - User online status - # - Context appropriateness - - return eligible - - def generate_transmission_message(self, user_id: str) -> Optional[str]: - """Generate a message to transmit to user""" - if not self.persona.can_transmit_to(user_id): - return None - - state = self.persona.get_current_state() - relationship = self.persona.relationships.get_or_create_relationship(user_id) - - # Get recent memories related to this user - active_memories = self.persona.memory.get_active_memories(limit=3) - - # Simple message generation based on mood and relationship - if state.fortune.breakthrough_triggered: - message = "Something special happened today! I felt compelled to reach out." - elif state.current_mood == "joyful": - message = "I was thinking of you today. Hope you're doing well!" - elif relationship.status.value == "close_friend": - message = "I've been reflecting on our conversations. Thank you for being here." - else: - message = "Hello! I wanted to check in with you." - - return message - - def record_transmission(self, user_id: str, message: str, success: bool): - """Record a transmission attempt""" - transmission = { - "timestamp": datetime.now().isoformat(), - "user_id": user_id, - "message": message, - "success": success, - "mood": self.persona.get_current_state().current_mood, - "relationship_score": self.persona.relationships.get_or_create_relationship(user_id).score - } - - self.transmissions.append(transmission) - self._save_transmissions() - - if success: - self.logger.info(f"Successfully transmitted to {user_id}") - else: - self.logger.warning(f"Failed to transmit to {user_id}") - - def get_transmission_stats(self, user_id: Optional[str] = None) -> Dict: - """Get transmission statistics""" - if user_id: - user_transmissions = [t for t in self.transmissions if t["user_id"] == user_id] - else: - user_transmissions = self.transmissions - - if not user_transmissions: - return { - "total": 0, - "successful": 0, - "failed": 0, - "success_rate": 0.0 - } - - successful = sum(1 for t in user_transmissions if t["success"]) - total = len(user_transmissions) - - return { - "total": total, - "successful": successful, - "failed": total - successful, - "success_rate": successful / total if total > 0 else 0.0 - } \ No newline at end of file diff --git a/python_backup/uv_setup.sh b/python_backup/uv_setup.sh deleted file mode 100755 index cd3c549..0000000 --- a/python_backup/uv_setup.sh +++ /dev/null @@ -1,54 +0,0 @@ -#!/bin/bash - -# ai.gpt UV environment setup script -set -e - -echo "🚀 Setting up ai.gpt with UV..." - -# Check if uv is installed -if ! command -v uv &> /dev/null; then - echo "❌ UV is not installed. Installing UV..." - curl -LsSf https://astral.sh/uv/install.sh | sh - export PATH="$HOME/.cargo/bin:$PATH" - echo "✅ UV installed successfully" -else - echo "✅ UV is already installed" -fi - -# Navigate to gpt directory -cd "$(dirname "$0")" -echo "📁 Working directory: $(pwd)" - -# Create virtual environment if it doesn't exist -if [ ! -d ".venv" ]; then - echo "🔧 Creating UV virtual environment..." - uv venv - echo "✅ Virtual environment created" -else - echo "✅ Virtual environment already exists" -fi - -# Install dependencies -echo "📦 Installing dependencies with UV..." -uv pip install -e . - -# Verify installation -echo "🔍 Verifying installation..." -source .venv/bin/activate -which aigpt -aigpt --help - -echo "" -echo "🎉 Setup complete!" -echo "" -echo "Usage:" -echo " source .venv/bin/activate" -echo " aigpt docs generate --project=os" -echo " aigpt docs sync --all" -echo " aigpt docs --help" -echo "" -echo "UV commands:" -echo " uv pip install # Install package" -echo " uv pip list # List packages" -echo " uv run aigpt # Run without activating" -echo "" \ No newline at end of file diff --git a/test_commands.sh b/scpt/test_commands.sh similarity index 100% rename from test_commands.sh rename to scpt/test_commands.sh diff --git a/test_completion.sh b/scpt/test_completion.sh similarity index 100% rename from test_completion.sh rename to scpt/test_completion.sh diff --git a/test_shell.sh b/scpt/test_shell.sh similarity index 100% rename from test_shell.sh rename to scpt/test_shell.sh diff --git a/test_shell_manual.sh b/scpt/test_shell_manual.sh similarity index 100% rename from test_shell_manual.sh rename to scpt/test_shell_manual.sh diff --git a/shell b/shell deleted file mode 160000 index 81ae003..0000000 --- a/shell +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 81ae0037d9d58669dc6bc202881fca5254ba5bf4 diff --git a/src/bin/test_config.rs b/src/bin/test_config.rs deleted file mode 100644 index 228760e..0000000 --- a/src/bin/test_config.rs +++ /dev/null @@ -1,54 +0,0 @@ -use aigpt::config::Config; -use anyhow::Result; - -fn main() -> Result<()> { - println!("Testing configuration loading..."); - - // Debug: check which JSON files exist - let possible_paths = vec![ - "../config.json", - "config.json", - "gpt/config.json", - "/Users/syui/ai/ai/gpt/config.json", - ]; - - println!("Checking for config.json files:"); - for path in &possible_paths { - let path_buf = std::path::PathBuf::from(path); - if path_buf.exists() { - println!(" ✓ Found: {}", path); - } else { - println!(" ✗ Not found: {}", path); - } - } - - // Load configuration - let config = Config::new(None)?; - - println!("Configuration loaded successfully!"); - println!("Default provider: {}", config.default_provider); - println!("Available providers:"); - for (name, provider) in &config.providers { - println!(" - {}: model={}, host={:?}", - name, - provider.default_model, - provider.host); - } - - if let Some(mcp) = &config.mcp { - println!("\nMCP Configuration:"); - println!(" Enabled: {}", mcp.enabled); - println!(" Auto-detect: {}", mcp.auto_detect); - println!(" Servers: {}", mcp.servers.len()); - } - - if let Some(atproto) = &config.atproto { - println!("\nATProto Configuration:"); - println!(" Host: {}", atproto.host); - println!(" Handle: {:?}", atproto.handle); - } - - println!("\nConfig file path: {}", config.data_dir.join("config.json").display()); - - Ok(()) -} \ No newline at end of file diff --git a/src/docs.rs b/src/docs.rs index 5e82e28..a17cf3b 100644 --- a/src/docs.rs +++ b/src/docs.rs @@ -575,16 +575,16 @@ impl DocsManager { std::fs::write(&title_path, &home_content)?; println!(" ✓ Updated: {}", "title.md".green()); - // auto/ディレクトリの更新 - let auto_dir = ai_wiki_path.join("auto"); - std::fs::create_dir_all(&auto_dir)?; - + // プロジェクト個別ディレクトリの更新 let projects = self.discover_projects()?; for project in projects { - let auto_content = self.generate_auto_project_content(&project).await?; - let auto_file = auto_dir.join(format!("{}.md", project)); - std::fs::write(&auto_file, auto_content)?; - println!(" ✓ Updated: {}", format!("auto/{}.md", project).green()); + let project_dir = ai_wiki_path.join(&project); + std::fs::create_dir_all(&project_dir)?; + + let project_content = self.generate_auto_project_content(&project).await?; + let project_file = project_dir.join(format!("{}.md", project)); + std::fs::write(&project_file, project_content)?; + println!(" ✓ Updated: {}", format!("{}/{}.md", project, project).green()); } println!("{}", "✅ ai.wiki updated successfully".green().bold()); @@ -630,7 +630,7 @@ impl DocsManager { if let Some(projects_in_category) = project_sections.get(category) { for (project, info) in projects_in_category { - content.push_str(&format!("#### [{}](auto/{}.md)\n", project, project)); + content.push_str(&format!("#### [{}]({}.md)\n", project, project)); if !info.description.is_empty() { content.push_str(&format!("- **名前**: ai.{} - **パッケージ**: ai{} - **タイプ**: {} - **役割**: {}\n\n", @@ -638,14 +638,15 @@ impl DocsManager { } content.push_str(&format!("**Status**: {} \n", info.status)); - content.push_str(&format!("**Links**: [Repo](https://git.syui.ai/ai/{}) | [Docs](https://git.syui.ai/ai/{}/src/branch/main/claude.md)\n\n", project, project)); + let branch = self.get_project_branch(project); + content.push_str(&format!("**Links**: [Repo](https://git.syui.ai/ai/{}) | [Docs](https://git.syui.ai/ai/{}/src/branch/{}/claude.md)\n\n", project, project, branch)); } } } content.push_str("---\n\n"); content.push_str("## ディレクトリ構成\n\n"); - content.push_str("- `auto/` - 自動生成されたプロジェクト概要\n"); + content.push_str("- `{project}/` - プロジェクト個別ドキュメント\n"); content.push_str("- `claude/` - Claude Code作業記録\n"); content.push_str("- `manual/` - 手動作成ドキュメント\n\n"); content.push_str("---\n\n"); @@ -655,7 +656,7 @@ impl DocsManager { Ok(content) } - /// auto/プロジェクトファイルのコンテンツ生成 + /// プロジェクト個別ファイルのコンテンツ生成 async fn generate_auto_project_content(&self, project: &str) -> Result { let info = self.load_project_info(project).unwrap_or_default(); let mut content = String::new(); @@ -669,7 +670,8 @@ impl DocsManager { content.push_str(&format!("- **タイプ**: {}\n", info.project_type)); content.push_str(&format!("- **説明**: {}\n", info.description)); content.push_str(&format!("- **ステータス**: {}\n", info.status)); - content.push_str("- **ブランチ**: main\n"); + let branch = self.get_project_branch(project); + content.push_str(&format!("- **ブランチ**: {}\n", branch)); content.push_str("- **最終更新**: Unknown\n\n"); // プロジェクト固有の機能情報を追加 @@ -684,7 +686,8 @@ impl DocsManager { content.push_str("## リンク\n"); content.push_str(&format!("- **Repository**: https://git.syui.ai/ai/{}\n", project)); content.push_str(&format!("- **Project Documentation**: [claude/projects/{}.md](https://git.syui.ai/ai/ai/src/branch/main/claude/projects/{}.md)\n", project, project)); - content.push_str(&format!("- **Generated Documentation**: [{}/claude.md](https://git.syui.ai/ai/{}/src/branch/main/claude.md)\n\n", project, project)); + let branch = self.get_project_branch(project); + content.push_str(&format!("- **Generated Documentation**: [{}/claude.md](https://git.syui.ai/ai/{}/src/branch/{}/claude.md)\n\n", project, project, branch)); content.push_str("---\n"); content.push_str(&format!("*このページは claude/projects/{}.md から自動生成されました*\n", project)); @@ -761,4 +764,26 @@ impl DocsManager { Ok(()) } + + /// メインai.jsonからプロジェクトのブランチ情報を取得 + fn get_project_branch(&self, project: &str) -> String { + let main_ai_json_path = self.ai_root.join("ai.json"); + + if main_ai_json_path.exists() { + if let Ok(content) = std::fs::read_to_string(&main_ai_json_path) { + if let Ok(json_data) = serde_json::from_str::(&content) { + if let Some(ai_section) = json_data.get("ai") { + if let Some(project_data) = ai_section.get(project) { + if let Some(branch) = project_data.get("branch").and_then(|v| v.as_str()) { + return branch.to_string(); + } + } + } + } + } + } + + // デフォルトはmain + "main".to_string() + } } \ No newline at end of file diff --git a/src/http_client.rs b/src/http_client.rs index b11efc1..2d02f9c 100644 --- a/src/http_client.rs +++ b/src/http_client.rs @@ -1,21 +1,110 @@ use anyhow::{anyhow, Result}; use reqwest::Client; use serde_json::Value; +use serde::{Serialize, Deserialize}; use std::time::Duration; +use std::collections::HashMap; + +/// Service configuration for unified service management +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ServiceConfig { + pub base_url: String, + pub timeout: Duration, + pub health_endpoint: String, +} + +impl Default for ServiceConfig { + fn default() -> Self { + Self { + base_url: "http://localhost:8000".to_string(), + timeout: Duration::from_secs(30), + health_endpoint: "/health".to_string(), + } + } +} /// HTTP client for inter-service communication pub struct ServiceClient { client: Client, + service_registry: HashMap, } impl ServiceClient { pub fn new() -> Self { + Self::with_default_services() + } + + /// Create ServiceClient with default ai ecosystem services + pub fn with_default_services() -> Self { let client = Client::builder() .timeout(Duration::from_secs(30)) .build() .expect("Failed to create HTTP client"); - Self { client } + let mut service_registry = HashMap::new(); + + // Register default ai ecosystem services + service_registry.insert("ai.card".to_string(), ServiceConfig { + base_url: "http://localhost:8000".to_string(), + timeout: Duration::from_secs(30), + health_endpoint: "/health".to_string(), + }); + + service_registry.insert("ai.log".to_string(), ServiceConfig { + base_url: "http://localhost:8002".to_string(), + timeout: Duration::from_secs(30), + health_endpoint: "/health".to_string(), + }); + + service_registry.insert("ai.bot".to_string(), ServiceConfig { + base_url: "http://localhost:8003".to_string(), + timeout: Duration::from_secs(30), + health_endpoint: "/health".to_string(), + }); + + Self { client, service_registry } + } + + /// Create ServiceClient with custom service registry + pub fn with_services(service_registry: HashMap) -> Self { + let client = Client::builder() + .timeout(Duration::from_secs(30)) + .build() + .expect("Failed to create HTTP client"); + + Self { client, service_registry } + } + + /// Register a new service configuration + pub fn register_service(&mut self, name: String, config: ServiceConfig) { + self.service_registry.insert(name, config); + } + + /// Get service configuration by name + pub fn get_service_config(&self, service: &str) -> Result<&ServiceConfig> { + self.service_registry.get(service) + .ok_or_else(|| anyhow!("Unknown service: {}", service)) + } + + /// Universal service method call + pub async fn call_service_method( + &self, + service: &str, + method: &str, + params: &T + ) -> Result { + let config = self.get_service_config(service)?; + let url = format!("{}/{}", config.base_url.trim_end_matches('/'), method.trim_start_matches('/')); + + self.post_request(&url, &serde_json::to_value(params)?).await + } + + /// Universal service GET call + pub async fn call_service_get(&self, service: &str, endpoint: &str) -> Result { + let config = self.get_service_config(service)?; + let url = format!("{}/{}", config.base_url.trim_end_matches('/'), endpoint.trim_start_matches('/')); + + self.get_request(&url).await } /// Check if a service is available @@ -68,18 +157,50 @@ impl ServiceClient { /// Get user's card collection from ai.card service pub async fn get_user_cards(&self, user_did: &str) -> Result { - let url = format!("http://localhost:8000/api/v1/cards/user/{}", user_did); - self.get_request(&url).await + let endpoint = format!("api/v1/cards/user/{}", user_did); + self.call_service_get("ai.card", &endpoint).await } /// Draw a card for user from ai.card service pub async fn draw_card(&self, user_did: &str, is_paid: bool) -> Result { - let payload = serde_json::json!({ + let params = serde_json::json!({ "user_did": user_did, "is_paid": is_paid }); - self.post_request("http://localhost:8000/api/v1/cards/draw", &payload).await + self.call_service_method("ai.card", "api/v1/cards/draw", ¶ms).await + } + + /// Get card statistics from ai.card service + pub async fn get_card_stats(&self) -> Result { + self.call_service_get("ai.card", "api/v1/cards/gacha-stats").await + } + + // MARK: - ai.log service methods + + /// Create a new blog post + pub async fn create_blog_post(&self, params: &T) -> Result { + self.call_service_method("ai.log", "api/v1/posts", params).await + } + + /// Get list of blog posts + pub async fn get_blog_posts(&self) -> Result { + self.call_service_get("ai.log", "api/v1/posts").await + } + + /// Build the blog + pub async fn build_blog(&self) -> Result { + self.call_service_method("ai.log", "api/v1/build", &serde_json::json!({})).await + } + + /// Translate document using ai.log service + pub async fn translate_document(&self, params: &T) -> Result { + self.call_service_method("ai.log", "api/v1/translate", params).await + } + + /// Generate documentation using ai.log service + pub async fn generate_docs(&self, params: &T) -> Result { + self.call_service_method("ai.log", "api/v1/docs", params).await } } diff --git a/src/mcp_server.rs b/src/mcp_server.rs index 9bdd7e1..d0d0a7d 100644 --- a/src/mcp_server.rs +++ b/src/mcp_server.rs @@ -1860,38 +1860,63 @@ async fn execute_command_handler( } } -// AI Card proxy handlers (TODO: Fix ServiceClient method visibility) +// AI Card proxy handlers async fn get_user_cards_handler( - State(_state): State, + State(state): State, AxumPath(user_id): AxumPath, ) -> Json { - // TODO: Implement proper ai.card service integration - Json(MCPHttpResponse { - success: false, - result: None, - error: Some(format!("AI Card service integration not yet implemented for user: {}", user_id)), - }) + let server = state.lock().await; + match server.service_client.get_user_cards(&user_id).await { + Ok(cards) => Json(MCPHttpResponse { + success: true, + result: Some(cards), + error: None, + }), + Err(e) => Json(MCPHttpResponse { + success: false, + result: None, + error: Some(format!("Failed to get user cards: {}", e)), + }), + } } async fn draw_card_handler( - State(_state): State, - Json(_request): Json, + State(state): State, + Json(request): Json, ) -> Json { - // TODO: Implement proper ai.card service integration - Json(MCPHttpResponse { - success: false, - result: None, - error: Some("AI Card draw service integration not yet implemented".to_string()), - }) + // Extract user_did from user_id field, default is_paid to false for now + let user_did = request.user_id.as_deref().unwrap_or("unknown"); + let is_paid = false; // TODO: Add is_paid field to MCPHttpRequest if needed + + let server = state.lock().await; + match server.service_client.draw_card(user_did, is_paid).await { + Ok(card) => Json(MCPHttpResponse { + success: true, + result: Some(card), + error: None, + }), + Err(e) => Json(MCPHttpResponse { + success: false, + result: None, + error: Some(format!("Failed to draw card: {}", e)), + }), + } } -async fn get_card_stats_handler(State(_state): State) -> Json { - // TODO: Implement proper ai.card service integration - Json(MCPHttpResponse { - success: false, - result: None, - error: Some("AI Card stats service integration not yet implemented".to_string()), - }) +async fn get_card_stats_handler(State(state): State) -> Json { + let server = state.lock().await; + match server.service_client.get_card_stats().await { + Ok(stats) => Json(MCPHttpResponse { + success: true, + result: Some(stats), + error: None, + }), + Err(e) => Json(MCPHttpResponse { + success: false, + result: None, + error: Some(format!("Failed to get card stats: {}", e)), + }), + } } // AI Log proxy handlers (placeholder - these would need to be implemented)