Compare commits
17 Commits
feature/sh
...
110674659b
| Author | SHA1 | Date | |
|---|---|---|---|
|
110674659b
|
|||
|
6dadc41da7
|
|||
|
64e519d719
|
|||
|
ed6d6e0d47
|
|||
|
582b983a32
|
|||
|
b410c83605
|
|||
|
334e17a53e
|
|||
|
df86fb827e
|
|||
|
5a441e847d
|
|||
|
948bbc24ea
|
|||
|
d4de0d4917
|
|||
|
3487535e08
|
|||
|
1755dc2bec
|
|||
|
42c85fc820
|
|||
|
4a441279fb
|
|||
| e7e57b7b4b | |||
| 8c0961ab2f |
@@ -42,7 +42,16 @@
|
||||
"Bash(echo:*)",
|
||||
"Bash(aigpt shell:*)",
|
||||
"Bash(aigpt maintenance)",
|
||||
"Bash(aigpt status syui)"
|
||||
"Bash(aigpt status syui)",
|
||||
"Bash(cp:*)",
|
||||
"Bash(./setup_venv.sh:*)",
|
||||
"WebFetch(domain:docs.anthropic.com)",
|
||||
"Bash(launchctl:*)",
|
||||
"Bash(sudo lsof:*)",
|
||||
"Bash(sudo:*)",
|
||||
"Bash(cargo check:*)",
|
||||
"Bash(cargo run:*)",
|
||||
"Bash(cargo test:*)"
|
||||
],
|
||||
"deny": []
|
||||
}
|
||||
|
||||
3
.gitmodules
vendored
3
.gitmodules
vendored
@@ -5,3 +5,6 @@
|
||||
path = card
|
||||
url = git@git.syui.ai:ai/card
|
||||
branch = claude
|
||||
[submodule "log"]
|
||||
path = log
|
||||
url = git@git.syui.ai:ai/log
|
||||
|
||||
37
Cargo.toml
Normal file
37
Cargo.toml
Normal file
@@ -0,0 +1,37 @@
|
||||
[package]
|
||||
name = "aigpt"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
description = "AI.GPT - Autonomous transmission AI with unique personality (Rust implementation)"
|
||||
authors = ["syui"]
|
||||
|
||||
[[bin]]
|
||||
name = "aigpt"
|
||||
path = "src/main.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "test-config"
|
||||
path = "src/bin/test_config.rs"
|
||||
|
||||
[dependencies]
|
||||
clap = { version = "4.0", features = ["derive"] }
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
tokio = { version = "1.0", features = ["full"] }
|
||||
chrono = { version = "0.4", features = ["serde", "std"] }
|
||||
chrono-tz = "0.8"
|
||||
uuid = { version = "1.0", features = ["v4"] }
|
||||
anyhow = "1.0"
|
||||
colored = "2.0"
|
||||
dirs = "5.0"
|
||||
reqwest = { version = "0.11", features = ["json"] }
|
||||
url = "2.4"
|
||||
rustyline = "14.0"
|
||||
axum = "0.7"
|
||||
tower = "0.4"
|
||||
tower-http = { version = "0.5", features = ["cors"] }
|
||||
hyper = "1.0"
|
||||
|
||||
# OpenAI API client
|
||||
async-openai = "0.23"
|
||||
openai_api_rust = "0.1"
|
||||
115
DEVELOPMENT.md
Normal file
115
DEVELOPMENT.md
Normal file
@@ -0,0 +1,115 @@
|
||||
# ai.gpt プロジェクト固有情報
|
||||
|
||||
## プロジェクト概要
|
||||
- **名前**: ai.gpt
|
||||
- **パッケージ**: aigpt
|
||||
- **タイプ**: 自律的送信AI + 統合MCP基盤
|
||||
- **役割**: 記憶・関係性・開発支援の統合AIシステム
|
||||
|
||||
## 実装完了状況
|
||||
|
||||
### 🧠 記憶システム(MemoryManager)
|
||||
- **階層的記憶**: 完全ログ→AI要約→コア記憶→選択的忘却
|
||||
- **文脈検索**: キーワード・意味的検索
|
||||
- **記憶要約**: AI駆動自動要約機能
|
||||
|
||||
### 🤝 関係性システム(RelationshipTracker)
|
||||
- **不可逆性**: 現実の人間関係と同じ重み
|
||||
- **時間減衰**: 自然な関係性変化
|
||||
- **送信判定**: 関係性閾値による自発的コミュニケーション
|
||||
|
||||
### 🎭 人格システム(Persona)
|
||||
- **AI運勢**: 1-10ランダム値による日々の人格変動
|
||||
- **統合管理**: 記憶・関係性・運勢の統合判断
|
||||
- **継続性**: 長期記憶による人格継承
|
||||
|
||||
### 💻 ai.shell統合(Claude Code機能)
|
||||
- **インタラクティブ環境**: `aigpt shell`
|
||||
- **開発支援**: ファイル分析・コード生成・プロジェクト管理
|
||||
- **継続開発**: プロジェクト文脈保持
|
||||
|
||||
## MCP Server統合(23ツール)
|
||||
|
||||
### 🧠 Memory System(5ツール)
|
||||
- get_memories, get_contextual_memories, search_memories
|
||||
- create_summary, create_core_memory
|
||||
|
||||
### 🤝 Relationships(4ツール)
|
||||
- get_relationship, get_all_relationships
|
||||
- process_interaction, check_transmission_eligibility
|
||||
|
||||
### 💻 Shell Integration(5ツール)
|
||||
- execute_command, analyze_file, write_file
|
||||
- read_project_file, list_files
|
||||
|
||||
### 🔒 Remote Execution(4ツール)
|
||||
- remote_shell, ai_bot_status
|
||||
- isolated_python, isolated_analysis
|
||||
|
||||
### ⚙️ System State(3ツール)
|
||||
- get_persona_state, get_fortune, run_maintenance
|
||||
|
||||
### 🎴 ai.card連携(6ツール + 独立MCPサーバー)
|
||||
- card_draw_card, card_get_user_cards, card_analyze_collection
|
||||
- **独立サーバー**: FastAPI + MCP (port 8000)
|
||||
|
||||
### 📝 ai.log連携(8ツール + Rustサーバー)
|
||||
- log_create_post, log_ai_content, log_translate_document
|
||||
- **独立サーバー**: Rust製 (port 8002)
|
||||
|
||||
## 開発環境・設定
|
||||
|
||||
### 環境構築
|
||||
```bash
|
||||
cd /Users/syui/ai/gpt
|
||||
./setup_venv.sh
|
||||
source ~/.config/syui/ai/gpt/venv/bin/activate
|
||||
```
|
||||
|
||||
### 設定管理
|
||||
- **メイン設定**: `/Users/syui/ai/gpt/config.json`
|
||||
- **データディレクトリ**: `~/.config/syui/ai/gpt/`
|
||||
- **仮想環境**: `~/.config/syui/ai/gpt/venv/`
|
||||
|
||||
### 使用方法
|
||||
```bash
|
||||
# ai.shell起動
|
||||
aigpt shell --model qwen2.5-coder:latest --provider ollama
|
||||
|
||||
# MCPサーバー起動
|
||||
aigpt server --port 8001
|
||||
|
||||
# 記憶システム体験
|
||||
aigpt chat syui "質問内容" --provider ollama --model qwen3:latest
|
||||
```
|
||||
|
||||
## 技術アーキテクチャ
|
||||
|
||||
### 統合構成
|
||||
```
|
||||
ai.gpt (統合MCPサーバー:8001)
|
||||
├── 🧠 ai.gpt core (記憶・関係性・人格)
|
||||
├── 💻 ai.shell (Claude Code風開発環境)
|
||||
├── 🎴 ai.card (独立MCPサーバー:8000)
|
||||
└── 📝 ai.log (Rust製ブログシステム:8002)
|
||||
```
|
||||
|
||||
### 今後の展開
|
||||
- **自律送信**: atproto実装による真の自発的コミュニケーション
|
||||
- **ai.ai連携**: 心理分析AIとの統合
|
||||
- **ai.verse統合**: UEメタバースとの連携
|
||||
- **分散SNS統合**: atproto完全対応
|
||||
|
||||
## 革新的な特徴
|
||||
|
||||
### AI駆動記憶システム
|
||||
- ChatGPT 4,000件ログから学習した効果的記憶構築
|
||||
- 人間的な忘却・重要度判定
|
||||
|
||||
### 不可逆関係性
|
||||
- 現実の人間関係と同じ重みを持つAI関係性
|
||||
- 修復不可能な関係性破綻システム
|
||||
|
||||
### 統合アーキテクチャ
|
||||
- fastapi_mcp基盤での複数AIシステム統合
|
||||
- OpenAI Function Calling + MCP完全連携実証済み
|
||||
@@ -1,365 +0,0 @@
|
||||
# ai.gpt 開発状況 (2025/06/02 更新)
|
||||
|
||||
## 前回セッション完了事項 (2025/06/01)
|
||||
|
||||
### ✅ ai.card MCPサーバー独立化完了
|
||||
- **ai.card専用MCPサーバー実装**: `card/api/app/mcp_server.py`
|
||||
- **9個のMCPツール公開**: カード管理・ガチャ・atproto同期等
|
||||
- **統合戦略変更**: ai.gptは統合サーバー、ai.cardは独立サーバー
|
||||
- **仮想環境セットアップ**: `~/.config/syui/ai/card/venv/`
|
||||
- **起動スクリプト**: `uvicorn app.main:app --port 8000`
|
||||
|
||||
### ✅ ai.shell統合完了
|
||||
- **Claude Code風シェル実装**: `aigpt shell` コマンド
|
||||
- **MCP統合強化**: 14種類のツール(ai.gpt:9, ai.shell:5)
|
||||
- **プロジェクト仕様書**: `aishell.md` 読み込み機能
|
||||
- **環境対応改善**: prompt-toolkit代替でinput()フォールバック
|
||||
|
||||
### ✅ 前回セッションのバグ修正完了
|
||||
- **config listバグ修正**: `config.list_keys()`メソッド呼び出し修正
|
||||
- **仮想環境問題解決**: `pip install -e .`でeditable mode確立
|
||||
- **全CLIコマンド動作確認済み**
|
||||
|
||||
## 現在の状態
|
||||
|
||||
### ✅ 実装済み機能
|
||||
|
||||
1. **基本システム**
|
||||
- 階層的記憶システム(完全ログ→要約→コア→忘却)
|
||||
- 不可逆的な関係性システム(broken状態は修復不可)
|
||||
- AI運勢による日々の人格変動
|
||||
- 時間減衰による自然な関係性変化
|
||||
|
||||
2. **CLI機能**
|
||||
- `chat` - AIとの会話(Ollama/OpenAI対応)
|
||||
- `status` - 状態確認
|
||||
- `fortune` - AI運勢確認
|
||||
- `relationships` - 関係一覧
|
||||
- `transmit` - 送信チェック(現在はprint出力)
|
||||
- `maintenance` - 日次メンテナンス
|
||||
- `config` - 設定管理(listバグ修正済み)
|
||||
- `schedule` - スケジューラー管理
|
||||
- `server` - MCP Server起動
|
||||
- `shell` - インタラクティブシェル(ai.shell統合)
|
||||
|
||||
3. **データ管理**
|
||||
- 保存場所: `~/.config/syui/ai/gpt/`(名前規則統一)
|
||||
- 設定: `config.json`
|
||||
- データ: `data/` ディレクトリ内の各種JSONファイル
|
||||
- 仮想環境: `~/.config/syui/ai/gpt/venv/`
|
||||
|
||||
4. **スケジューラー**
|
||||
- Cron形式とインターバル形式対応
|
||||
- 5種類のタスクタイプ実装済み
|
||||
- バックグラウンド実行可能
|
||||
|
||||
5. **MCP Server統合アーキテクチャ**
|
||||
- **ai.gpt統合サーバー**: 14種類のツール(port 8001)
|
||||
- **ai.card独立サーバー**: 9種類のツール(port 8000)
|
||||
- Claude Desktop/Cursor連携対応
|
||||
- fastapi_mcp統一基盤
|
||||
|
||||
6. **ai.shell統合(Claude Code風)**
|
||||
- インタラクティブシェルモード
|
||||
- シェルコマンド実行(!command形式)
|
||||
- AIコマンド(analyze, generate, explain)
|
||||
- aishell.md読み込み機能
|
||||
- 環境適応型プロンプト(prompt-toolkit/input())
|
||||
|
||||
## 🚧 次回開発の優先課題
|
||||
|
||||
### 最優先: システム統合の最適化
|
||||
|
||||
1. **ai.card重複コード削除**
|
||||
- **削除対象**: `src/aigpt/card_integration.py`(HTTPクライアント)
|
||||
- **削除対象**: ai.gptのMCPサーバーの`--enable-card`オプション
|
||||
- **理由**: ai.cardが独立MCPサーバーになったため不要
|
||||
- **統合方法**: ai.gpt(8001) → ai.card(8000) HTTP連携
|
||||
|
||||
2. **自律送信の実装**
|
||||
- 現在: コンソールにprint出力
|
||||
- TODO: atproto (Bluesky) への実際の投稿機能
|
||||
- 参考: ai.bot (Rust/seahorse) との連携も検討
|
||||
|
||||
3. **環境セットアップ自動化**
|
||||
- 仮想環境自動作成スクリプト強化
|
||||
- 依存関係の自動解決
|
||||
- Claude Desktop設定例の提供
|
||||
|
||||
### 中期的課題
|
||||
|
||||
1. **テストの追加**
|
||||
- 単体テスト
|
||||
- 統合テスト
|
||||
- CI/CDパイプライン
|
||||
|
||||
2. **エラーハンドリングの改善**
|
||||
- より詳細なエラーメッセージ
|
||||
- リトライ機構
|
||||
|
||||
3. **ai.botとの連携**
|
||||
- Rust側のAPIエンドポイント作成
|
||||
- 送信機能の委譲
|
||||
|
||||
4. **より高度な記憶要約**
|
||||
- 現在: シンプルな要約
|
||||
- TODO: AIによる意味的な要約
|
||||
|
||||
5. **Webダッシュボード**
|
||||
- 関係性の可視化
|
||||
- 記憶の管理UI
|
||||
|
||||
### 長期的課題
|
||||
|
||||
1. **他のsyuiプロジェクトとの統合**
|
||||
- ai.card: カードゲームとの連携
|
||||
- ai.verse: メタバース内でのNPC人格
|
||||
- ai.os: システムレベルでの統合
|
||||
|
||||
2. **分散化**
|
||||
- atproto上でのデータ保存
|
||||
- ユーザーデータ主権の完全実現
|
||||
|
||||
## 次回開発時のエントリーポイント
|
||||
|
||||
### 🎯 最優先: ai.card重複削除
|
||||
```bash
|
||||
# 1. ai.card独立サーバー起動確認
|
||||
cd /Users/syui/ai/gpt/card/api
|
||||
source ~/.config/syui/ai/card/venv/bin/activate
|
||||
uvicorn app.main:app --port 8000
|
||||
|
||||
# 2. ai.gptから重複機能削除
|
||||
rm src/aigpt/card_integration.py
|
||||
# mcp_server.pyから--enable-cardオプション削除
|
||||
|
||||
# 3. 統合テスト
|
||||
aigpt server --port 8001 # ai.gpt統合サーバー
|
||||
curl "http://localhost:8001/get_memories" # ai.gpt機能確認
|
||||
curl "http://localhost:8000/get_gacha_stats" # ai.card機能確認
|
||||
```
|
||||
|
||||
### 1. 自律送信を実装する場合
|
||||
```python
|
||||
# src/aigpt/transmission.py を編集
|
||||
# atproto-python ライブラリを追加
|
||||
# _handle_transmission_check() メソッドを更新
|
||||
```
|
||||
|
||||
### 2. ai.botと連携する場合
|
||||
```python
|
||||
# 新規ファイル: src/aigpt/bot_connector.py
|
||||
# ai.botのAPIエンドポイントにHTTPリクエスト
|
||||
```
|
||||
|
||||
### 3. テストを追加する場合
|
||||
```bash
|
||||
# tests/ディレクトリを作成
|
||||
# pytest設定を追加
|
||||
```
|
||||
|
||||
### 4. 環境セットアップを自動化する場合
|
||||
```bash
|
||||
# setup_venv.sh を強化
|
||||
# Claude Desktop設定例をdocs/に追加
|
||||
```
|
||||
|
||||
## 設計思想の要点(AI向け)
|
||||
|
||||
1. **唯一性(yui system)**: 各ユーザーとAIの関係は1:1で、改変不可能
|
||||
2. **不可逆性**: 関係性の破壊は修復不可能(現実の人間関係と同じ)
|
||||
3. **階層的記憶**: ただのログではなく、要約・コア判定・忘却のプロセス
|
||||
4. **環境影響**: AI運勢による日々の人格変動(固定的でない)
|
||||
5. **段階的実装**: まずCLI print → atproto投稿 → ai.bot連携
|
||||
|
||||
## 現在のアーキテクチャ理解(次回のAI向け)
|
||||
|
||||
### システム構成
|
||||
```
|
||||
Claude Desktop/Cursor
|
||||
↓
|
||||
ai.gpt MCP (port 8001) ←-- 統合サーバー(14ツール)
|
||||
├── ai.gpt機能: メモリ・関係性・人格(9ツール)
|
||||
├── ai.shell機能: シェル・ファイル操作(5ツール)
|
||||
└── HTTP client → ai.card MCP (port 8000)
|
||||
↓
|
||||
ai.card独立サーバー(9ツール)
|
||||
├── カード管理・ガチャ
|
||||
├── atproto同期
|
||||
└── PostgreSQL/SQLite
|
||||
```
|
||||
|
||||
### 技術スタック
|
||||
- **言語**: Python (typer CLI, fastapi_mcp)
|
||||
- **AI統合**: Ollama (qwen2.5) / OpenAI API
|
||||
- **データ形式**: JSON(将来的にSQLite検討)
|
||||
- **認証**: atproto DID(設計済み・実装待ち)
|
||||
- **MCP統合**: fastapi_mcp統一基盤
|
||||
- **仮想環境**: `~/.config/syui/ai/{gpt,card}/venv/`
|
||||
|
||||
### 名前規則(重要)
|
||||
- **パッケージ**: `aigpt`
|
||||
- **コマンド**: `aigpt shell`, `aigpt server`
|
||||
- **ディレクトリ**: `~/.config/syui/ai/gpt/`
|
||||
- **ドメイン**: `ai.gpt`
|
||||
|
||||
### 即座に始める手順
|
||||
```bash
|
||||
# 1. 環境確認
|
||||
cd /Users/syui/ai/gpt
|
||||
source ~/.config/syui/ai/gpt/venv/bin/activate
|
||||
aigpt --help
|
||||
|
||||
# 2. 前回の成果物確認
|
||||
aigpt config list
|
||||
aigpt shell # Claude Code風環境
|
||||
|
||||
# 3. 詳細情報
|
||||
cat docs/ai_card_mcp_integration_summary.md
|
||||
cat docs/ai_shell_integration_summary.md
|
||||
```
|
||||
|
||||
このファイルを参照することで、次回の開発が迅速に開始でき、前回の作業内容を完全に理解できます。
|
||||
|
||||
## 現セッション完了事項 (2025/06/02)
|
||||
|
||||
### ✅ 記憶システム大幅改善完了
|
||||
|
||||
前回のAPI Errorで停止したChatGPTログ分析作業の続きを実行し、記憶システムを完全に再設計・実装した。
|
||||
|
||||
#### 新実装機能:
|
||||
|
||||
1. **スマート要約生成 (`create_smart_summary`)**
|
||||
- AI駆動によるテーマ別記憶要約
|
||||
- 会話パターン・技術的トピック・関係性進展の分析
|
||||
- メタデータ付きでの保存(期間、テーマ、記憶数)
|
||||
- フォールバック機能でAIが利用できない場合も対応
|
||||
|
||||
2. **コア記憶分析 (`create_core_memory`)**
|
||||
- 全記憶を分析して人格形成要素を抽出
|
||||
- ユーザーの特徴的なコミュニケーションスタイルを特定
|
||||
- 問題解決パターン・興味関心の深層分析
|
||||
- 永続保存される本質的な関係性記憶
|
||||
|
||||
3. **階層的記憶検索 (`get_contextual_memories`)**
|
||||
- CORE → SUMMARY → RECENT の優先順位付き検索
|
||||
- キーワードベースの関連性スコアリング
|
||||
- クエリに応じた動的な記憶重み付け
|
||||
- 構造化された記憶グループでの返却
|
||||
|
||||
4. **高度記憶検索 (`search_memories`)**
|
||||
- 複数キーワード対応の全文検索
|
||||
- メモリレベル別フィルタリング
|
||||
- マッチスコア付きでの結果返却
|
||||
|
||||
5. **コンテキスト対応AI応答**
|
||||
- `build_context_prompt`: 記憶に基づく文脈プロンプト生成
|
||||
- 人格状態・ムード・運勢を統合した応答
|
||||
- CORE記憶を常に参照した一貫性のある会話
|
||||
|
||||
6. **MCPサーバー拡張**
|
||||
- 新機能をすべてMCP API経由で利用可能
|
||||
- `/get_contextual_memories` - 文脈的記憶取得
|
||||
- `/search_memories` - 記憶検索
|
||||
- `/create_summary` - AI要約生成
|
||||
- `/create_core_memory` - コア記憶分析
|
||||
- `/get_context_prompt` - コンテキストプロンプト生成
|
||||
|
||||
7. **モデル拡張**
|
||||
- `Memory` モデルに `metadata` フィールド追加
|
||||
- 階層的記憶構造の完全サポート
|
||||
|
||||
#### 技術的特徴:
|
||||
- **AI統合**: ollama/OpenAI両対応でのインテリジェント分析
|
||||
- **フォールバック**: AI不使用時も基本機能は動作
|
||||
- **パターン分析**: ユーザー行動の自動分類・分析
|
||||
- **関連性スコア**: クエリとの関連度を数値化
|
||||
- **時系列分析**: 記憶の時間的発展を考慮
|
||||
|
||||
#### 前回議論の実現:
|
||||
ChatGPT 4,000件ログ分析から得られた知見を完全実装:
|
||||
- 階層的記憶(FULL_LOG → SUMMARY → CORE)
|
||||
- コンテキスト認識記憶(会話の流れを記憶)
|
||||
- 感情・関係性の記憶(変化パターンの追跡)
|
||||
- 実用的な記憶カテゴリ(ユーザー特徴・効果的応答・失敗回避)
|
||||
|
||||
### ✅ 追加完了事項 (同日)
|
||||
|
||||
**環境変数対応の改良**:
|
||||
- `OLLAMA_HOST`環境変数の自動読み込み対応
|
||||
- ai_provider.pyでの環境変数優先度実装
|
||||
- 設定ファイル → 環境変数 → デフォルトの階層的設定
|
||||
|
||||
**記憶システム完全動作確認**:
|
||||
- ollamaとの統合成功(gemma3:4bで確認)
|
||||
- 文脈的記憶検索の動作確認
|
||||
- ChatGPTインポートログからの記憶参照成功
|
||||
- AI応答での人格・ムード・運勢の反映確認
|
||||
|
||||
### 🚧 次回の課題
|
||||
- OLLAMA_HOSTの環境変数が完全に適用されない問題の解決
|
||||
- MCPサーバーのエラー解決(Internal Server Error)
|
||||
- qwen3:latestでの動作テスト完了
|
||||
- 記憶システムのコア機能(スマート要約・コア記憶分析)のAI統合テスト
|
||||
|
||||
## 現セッション完了事項 (2025/06/03 継続セッション)
|
||||
|
||||
### ✅ **前回API Error後の継続作業完了**
|
||||
|
||||
前回のセッションがAPI Errorで終了したが、今回正常に継続して以下を完了:
|
||||
|
||||
#### 🔧 **重要バグ修正**
|
||||
- **Memory model validation error 修正**: `importance_score`の浮動小数点精度問題を解決
|
||||
- 問題: `-5.551115123125783e-17`のような極小負数がvalidation errorを引き起こす
|
||||
- 解決: field validatorで極小値を0.0にクランプし、Field制約を除去
|
||||
- 結果: メモリ読み込み・全CLI機能が正常動作
|
||||
|
||||
#### 🧪 **システム動作確認完了**
|
||||
- **ai.gpt CLI**: 全コマンド正常動作確認済み
|
||||
- **記憶システム**: 階層的記憶(CORE→SUMMARY→RECENT)完全動作
|
||||
- **関係性進化**: syuiとの関係性が17.50→19.00に正常進展
|
||||
- **MCP Server**: 17種類のツール正常提供(port 8001)
|
||||
- **階層的記憶API**: `/get_contextual_memories`でblogクエリ正常動作
|
||||
|
||||
#### 💾 **記憶システム現状**
|
||||
- **CORE記憶**: blog開発、技術議論等の重要パターン記憶済み
|
||||
- **SUMMARY記憶**: AI×MCP、Qwen3解説等のテーマ別要約済み
|
||||
- **RECENT記憶**: 最新の記憶システムテスト履歴
|
||||
- **文脈検索**: キーワードベース関連性スコアリング動作確認
|
||||
|
||||
#### 🌐 **環境課題と対策**
|
||||
- **ollama接続**: OLLAMA_HOST環境変数は正しく設定済み(http://192.168.11.95:11434)
|
||||
- **AI統合課題**: qwen3:latestタイムアウト問題→記憶システム単体では正常動作
|
||||
- **フォールバック**: AI不使用時も記憶ベース応答で継続性確保
|
||||
|
||||
#### 🚀 **ai.bot統合完了 (同日追加)**
|
||||
- **MCP統合拡張**: 17→23ツールに増加(6個の新ツール追加)
|
||||
- **リモート実行機能**: systemd-nspawn隔離環境統合
|
||||
- `remote_shell`: ai.bot /sh機能との完全連携
|
||||
- `ai_bot_status`: サーバー状態確認とコンテナ情報取得
|
||||
- `isolated_python`: Python隔離実行環境
|
||||
- `isolated_analysis`: セキュアなファイル解析機能
|
||||
- **ai.shell拡張**: 新コマンド3種追加
|
||||
- `remote <command>`: 隔離コンテナでコマンド実行
|
||||
- `isolated <code>`: Python隔離実行
|
||||
- `aibot-status`: ai.botサーバー接続確認
|
||||
- **完全動作確認**: ヘルプ表示、コマンド補完、エラーハンドリング完了
|
||||
|
||||
#### 🏗️ **統合アーキテクチャ更新**
|
||||
```
|
||||
Claude Desktop/Cursor → ai.gpt MCP (port 8001, 23ツール)
|
||||
├── ai.gpt: メモリ・関係性・人格 (9ツール)
|
||||
├── ai.memory: 階層記憶・文脈検索 (5ツール)
|
||||
├── ai.shell: シェル・ファイル操作 (5ツール)
|
||||
├── ai.bot連携: リモート実行・隔離環境 (4ツール)
|
||||
└── ai.card連携: HTTP client → port 8000 (9ツール)
|
||||
```
|
||||
|
||||
#### 📋 **次回開発推奨事項**
|
||||
1. **ai.bot実サーバー**: 実際のai.botサーバー起動・連携テスト
|
||||
2. **隔離実行実証**: systemd-nspawn環境での実用性検証
|
||||
3. **ollama接続最適化**: タイムアウト問題の詳細調査・解決
|
||||
4. **AI要約機能**: maintenanceでのスマート要約・コア記憶生成テスト
|
||||
5. **セキュリティ強化**: 隔離実行の権限制御・サンドボックス検証
|
||||
|
||||
|
||||
800
README.md
800
README.md
@@ -1,727 +1,115 @@
|
||||
# ai.gpt - AI駆動記憶システム & 自律対話AI
|
||||
# ai.gpt プロジェクト固有情報
|
||||
|
||||
🧠 **革新的記憶システム** × 🤖 **自律的人格AI** × 🔗 **atproto統合**
|
||||
## プロジェクト概要
|
||||
- **名前**: ai.gpt
|
||||
- **パッケージ**: aigpt
|
||||
- **タイプ**: 自律的送信AI + 統合MCP基盤
|
||||
- **役割**: 記憶・関係性・開発支援の統合AIシステム
|
||||
|
||||
ChatGPTの4,000件会話ログから学んだ「効果的な記憶構築」を完全実装した、真の記憶を持つAIシステム。
|
||||
## 実装完了状況
|
||||
|
||||
## 🎯 核心機能
|
||||
### 🧠 記憶システム(MemoryManager)
|
||||
- **階層的記憶**: 完全ログ→AI要約→コア記憶→選択的忘却
|
||||
- **文脈検索**: キーワード・意味的検索
|
||||
- **記憶要約**: AI駆動自動要約機能
|
||||
|
||||
### 📚 AI駆動階層記憶システム
|
||||
- **CORE記憶**: 人格形成要素の永続的記憶(AIが自動分析・抽出)
|
||||
- **SUMMARY記憶**: テーマ別スマート要約(AI駆動パターン分析)
|
||||
- **記憶検索**: コンテキスト認識による関連性スコアリング
|
||||
- **選択的忘却**: 重要度に基づく自然な記憶の減衰
|
||||
### 🤝 関係性システム(RelationshipTracker)
|
||||
- **不可逆性**: 現実の人間関係と同じ重み
|
||||
- **時間減衰**: 自然な関係性変化
|
||||
- **送信判定**: 関係性閾値による自発的コミュニケーション
|
||||
|
||||
### 🤝 進化する関係性システム
|
||||
- **唯一性**: atproto DIDと1:1で紐付き、改変不可能な人格
|
||||
- **不可逆性**: 関係性が壊れたら修復不可能(現実の人間関係と同じ)
|
||||
- **時間減衰**: 自然な関係性の変化と送信閾値システム
|
||||
- **AI運勢**: 1-10のランダム値による日々の人格変動
|
||||
### 🎭 人格システム(Persona)
|
||||
- **AI運勢**: 1-10ランダム値による日々の人格変動
|
||||
- **統合管理**: 記憶・関係性・運勢の統合判断
|
||||
- **継続性**: 長期記憶による人格継承
|
||||
|
||||
### 🧬 統合アーキテクチャ
|
||||
- **fastapi-mcp統一基盤**: Claude Desktop/Cursor完全対応
|
||||
- **23種類のMCPツール**: 記憶・関係性・AI統合・シェル操作・リモート実行
|
||||
- **ai.shell統合**: Claude Code風インタラクティブ開発環境
|
||||
- **ai.bot連携**: systemd-nspawn隔離実行環境統合
|
||||
- **マルチAI対応**: ollama(qwen3/gemma3) + OpenAI統合
|
||||
### 💻 ai.shell統合(Claude Code機能)
|
||||
- **インタラクティブ環境**: `aigpt shell`
|
||||
- **開発支援**: ファイル分析・コード生成・プロジェクト管理
|
||||
- **継続開発**: プロジェクト文脈保持
|
||||
|
||||
## 🚀 クイックスタート
|
||||
## MCP Server統合(23ツール)
|
||||
|
||||
### 1分で体験する記憶システム
|
||||
### 🧠 Memory System(5ツール)
|
||||
- get_memories, get_contextual_memories, search_memories
|
||||
- create_summary, create_core_memory
|
||||
|
||||
### 🤝 Relationships(4ツール)
|
||||
- get_relationship, get_all_relationships
|
||||
- process_interaction, check_transmission_eligibility
|
||||
|
||||
### 💻 Shell Integration(5ツール)
|
||||
- execute_command, analyze_file, write_file
|
||||
- read_project_file, list_files
|
||||
|
||||
### 🔒 Remote Execution(4ツール)
|
||||
- remote_shell, ai_bot_status
|
||||
- isolated_python, isolated_analysis
|
||||
|
||||
### ⚙️ System State(3ツール)
|
||||
- get_persona_state, get_fortune, run_maintenance
|
||||
|
||||
### 🎴 ai.card連携(6ツール + 独立MCPサーバー)
|
||||
- card_draw_card, card_get_user_cards, card_analyze_collection
|
||||
- **独立サーバー**: FastAPI + MCP (port 8000)
|
||||
|
||||
### 📝 ai.log連携(8ツール + Rustサーバー)
|
||||
- log_create_post, log_ai_content, log_translate_document
|
||||
- **独立サーバー**: Rust製 (port 8002)
|
||||
|
||||
## 開発環境・設定
|
||||
|
||||
### 環境構築
|
||||
```bash
|
||||
# 1. セットアップ(自動)
|
||||
cd /Users/syui/ai/gpt
|
||||
./setup_venv.sh
|
||||
|
||||
# 2. ollama + qwen3で記憶テスト
|
||||
aigpt chat syui "記憶システムのテストです" --provider ollama --model qwen3:latest
|
||||
|
||||
# 3. 記憶の確認
|
||||
aigpt status syui
|
||||
|
||||
# 4. インタラクティブシェル体験
|
||||
aigpt shell
|
||||
```
|
||||
|
||||
### 記憶システム体験デモ
|
||||
|
||||
```bash
|
||||
# ChatGPTログインポート(既存データを使用)
|
||||
aigpt import-chatgpt ./json/chatgpt.json --user-id syui
|
||||
|
||||
# AI記憶分析
|
||||
aigpt maintenance # スマート要約 + コア記憶生成
|
||||
|
||||
# 記憶に基づく対話
|
||||
aigpt chat syui "前回の議論について覚えていますか?" --provider ollama --model qwen3:latest
|
||||
|
||||
# 記憶検索
|
||||
# MCPサーバー経由でのコンテキスト記憶取得
|
||||
aigpt server --port 8001 &
|
||||
curl "http://localhost:8001/get_contextual_memories?query=ai&limit=5"
|
||||
```
|
||||
|
||||
## インストール
|
||||
|
||||
```bash
|
||||
# 仮想環境セットアップ(推奨)
|
||||
cd /Users/syui/ai/gpt
|
||||
source ~/.config/syui/ai/gpt/venv/bin/activate
|
||||
pip install -e .
|
||||
|
||||
# または自動セットアップ
|
||||
./setup_venv.sh
|
||||
```
|
||||
|
||||
## 設定
|
||||
### 設定管理
|
||||
- **メイン設定**: `/Users/syui/ai/gpt/config.json`
|
||||
- **データディレクトリ**: `~/.config/syui/ai/gpt/`
|
||||
- **仮想環境**: `~/.config/syui/ai/gpt/venv/`
|
||||
|
||||
### APIキーの設定
|
||||
### 使用方法
|
||||
```bash
|
||||
# OpenAI APIキー
|
||||
aigpt config set providers.openai.api_key sk-xxxxx
|
||||
|
||||
# atproto認証情報(将来の自動投稿用)
|
||||
aigpt config set atproto.handle your.handle
|
||||
aigpt config set atproto.password your-password
|
||||
|
||||
# 設定一覧を確認
|
||||
aigpt config list
|
||||
```
|
||||
|
||||
### データ保存場所
|
||||
- 設定: `~/.config/syui/ai/gpt/config.json`
|
||||
- データ: `~/.config/syui/ai/gpt/data/`
|
||||
- 仮想環境: `~/.config/syui/ai/gpt/venv/`
|
||||
|
||||
## 使い方
|
||||
|
||||
### 会話する
|
||||
```bash
|
||||
aigpt chat "did:plc:xxxxx" "こんにちは、今日はどんな気分?"
|
||||
```
|
||||
|
||||
### ステータス確認
|
||||
```bash
|
||||
# AI全体の状態
|
||||
aigpt status
|
||||
|
||||
# 特定ユーザーとの関係
|
||||
aigpt status "did:plc:xxxxx"
|
||||
```
|
||||
|
||||
### 今日の運勢
|
||||
```bash
|
||||
aigpt fortune
|
||||
```
|
||||
|
||||
### 自律送信チェック
|
||||
```bash
|
||||
# ドライラン(確認のみ)
|
||||
aigpt transmit
|
||||
|
||||
# 実行
|
||||
aigpt transmit --execute
|
||||
```
|
||||
|
||||
### 日次メンテナンス
|
||||
```bash
|
||||
aigpt maintenance
|
||||
```
|
||||
|
||||
### 関係一覧
|
||||
```bash
|
||||
aigpt relationships
|
||||
```
|
||||
|
||||
### ChatGPTデータインポート
|
||||
```bash
|
||||
# ChatGPTの会話履歴をインポート
|
||||
aigpt import-chatgpt ./json/chatgpt.json --user-id "your_user_id"
|
||||
|
||||
# インポート後の確認
|
||||
aigpt status
|
||||
aigpt relationships
|
||||
```
|
||||
|
||||
## データ構造
|
||||
|
||||
デフォルトでは `~/.config/syui/ai/gpt/` に以下のファイルが保存されます:
|
||||
|
||||
- `memories.json` - 会話記憶
|
||||
- `conversations.json` - 会話ログ
|
||||
- `relationships.json` - 関係性パラメータ
|
||||
- `fortunes.json` - AI運勢履歴
|
||||
- `transmissions.json` - 送信履歴
|
||||
- `persona_state.json` - 人格状態
|
||||
|
||||
## 関係性の仕組み
|
||||
|
||||
- スコア0-200の範囲で変動
|
||||
- 100を超えると送信機能が解禁
|
||||
- 時間経過で自然減衰
|
||||
- 大きなネガティブな相互作用で破壊される可能性
|
||||
|
||||
## 🖥️ ai.shell統合 - Claude Code風開発環境
|
||||
|
||||
### 🚀 **基本起動**
|
||||
```bash
|
||||
# デフォルト(qwen2.5使用)
|
||||
aigpt shell
|
||||
|
||||
# qwen2.5-coder使用(コード生成に最適)
|
||||
# ai.shell起動
|
||||
aigpt shell --model qwen2.5-coder:latest --provider ollama
|
||||
|
||||
# qwen3使用(高度な対話)
|
||||
aigpt shell --model qwen3:latest --provider ollama
|
||||
|
||||
# OpenAI使用
|
||||
aigpt shell --model gpt-4o-mini --provider openai
|
||||
```
|
||||
|
||||
### 📋 **利用可能コマンド**
|
||||
```bash
|
||||
# === プロジェクト管理 ===
|
||||
load # aishell.md読み込み(AIがプロジェクト理解)
|
||||
status # AI状態・関係性確認
|
||||
fortune # AI運勢確認(人格に影響)
|
||||
relationships # 全関係性一覧
|
||||
|
||||
# === AI開発支援 ===
|
||||
analyze <file> # ファイル分析・コードレビュー
|
||||
generate <description> # コード生成(qwen2.5-coder推奨)
|
||||
explain <topic> # 概念・技術説明
|
||||
|
||||
# === シェル操作 ===
|
||||
!<command> # シェルコマンド実行
|
||||
!git status # git操作
|
||||
!ls -la # ファイル確認
|
||||
!mkdir project # ディレクトリ作成
|
||||
!pytest tests/ # テスト実行
|
||||
|
||||
# === リモート実行(ai.bot統合)===
|
||||
remote <command> # systemd-nspawn隔離コンテナでコマンド実行
|
||||
isolated <code> # Python隔離実行環境
|
||||
aibot-status # ai.botサーバー接続確認
|
||||
|
||||
# === インタラクティブ対話 ===
|
||||
help # コマンド一覧
|
||||
clear # 画面クリア
|
||||
exit/quit # 終了
|
||||
<任意のメッセージ> # 自由なAI対話
|
||||
```
|
||||
|
||||
### 🎯 **コマンド使用例**
|
||||
```bash
|
||||
ai.shell> load
|
||||
# → aishell.mdを読み込み、AIがプロジェクト目標を記憶
|
||||
|
||||
ai.shell> generate Python FastAPI CRUD for User model
|
||||
# → 完全なCRUD API コードを生成
|
||||
|
||||
ai.shell> analyze src/main.py
|
||||
# → コード品質・改善点を分析
|
||||
|
||||
ai.shell> !git log --oneline -5
|
||||
# → 最近のコミット履歴を表示
|
||||
|
||||
ai.shell> remote ls -la /tmp
|
||||
# → ai.bot隔離コンテナでディレクトリ確認
|
||||
|
||||
ai.shell> isolated print("Hello from isolated environment!")
|
||||
# → Python隔離実行でHello World
|
||||
|
||||
ai.shell> aibot-status
|
||||
# → ai.botサーバー接続状態とコンテナ情報確認
|
||||
|
||||
ai.shell> このAPIのセキュリティを改善してください
|
||||
# → 記憶に基づく具体的なセキュリティ改善提案
|
||||
|
||||
ai.shell> explain async/await in Python
|
||||
# → 非同期プログラミングの詳細説明
|
||||
```
|
||||
|
||||
## MCP Server統合アーキテクチャ
|
||||
|
||||
### ai.gpt統合サーバー
|
||||
```bash
|
||||
# ai.gpt統合サーバー起動(port 8001)
|
||||
aigpt server --model qwen2.5 --provider ollama --port 8001
|
||||
|
||||
# OpenAIを使用
|
||||
aigpt server --model gpt-4o-mini --provider openai --port 8001
|
||||
```
|
||||
|
||||
### ai.card独立サーバー
|
||||
```bash
|
||||
# ai.card独立サーバー起動(port 8000)
|
||||
cd card/api
|
||||
source ~/.config/syui/ai/card/venv/bin/activate
|
||||
uvicorn app.main:app --port 8000
|
||||
```
|
||||
|
||||
### ai.bot接続(リモート実行環境)
|
||||
```bash
|
||||
# ai.bot起動(port 8080、別途必要)
|
||||
# systemd-nspawn隔離コンテナでコマンド実行
|
||||
```
|
||||
|
||||
### アーキテクチャ構成
|
||||
```
|
||||
Claude Desktop/Cursor
|
||||
↓
|
||||
ai.gpt統合サーバー (port 8001) ← 23ツール
|
||||
├── ai.gpt機能: メモリ・関係性・人格 (9ツール)
|
||||
├── ai.shell機能: シェル・ファイル操作 (5ツール)
|
||||
├── ai.memory機能: 階層記憶・文脈検索 (5ツール)
|
||||
├── ai.bot連携: リモート実行・隔離環境 (4ツール)
|
||||
└── HTTP client → ai.card独立サーバー (port 8000)
|
||||
↓
|
||||
ai.card専用ツール (9ツール)
|
||||
├── カード管理・ガチャ
|
||||
├── atproto同期
|
||||
└── PostgreSQL/SQLite
|
||||
|
||||
ai.gpt統合サーバー → ai.bot (port 8080)
|
||||
↓
|
||||
systemd-nspawn container
|
||||
├── Arch Linux隔離環境
|
||||
├── SSH server
|
||||
└── セキュアコマンド実行
|
||||
```
|
||||
|
||||
### AIプロバイダーを使った会話
|
||||
```bash
|
||||
# Ollamaで会話
|
||||
aigpt chat "did:plc:xxxxx" "こんにちは" --provider ollama --model qwen2.5
|
||||
|
||||
# OpenAIで会話
|
||||
aigpt chat "did:plc:xxxxx" "今日の調子はどう?" --provider openai --model gpt-4o-mini
|
||||
```
|
||||
|
||||
### MCP Tools
|
||||
|
||||
サーバーが起動すると、以下のツールがAIから利用可能になります:
|
||||
|
||||
**ai.gpt ツール (9個):**
|
||||
- `get_memories` - アクティブな記憶を取得
|
||||
- `get_relationship` - 特定ユーザーとの関係を取得
|
||||
- `get_all_relationships` - すべての関係を取得
|
||||
- `get_persona_state` - 現在の人格状態を取得
|
||||
- `process_interaction` - ユーザーとの対話を処理
|
||||
- `check_transmission_eligibility` - 送信可能かチェック
|
||||
- `get_fortune` - 今日の運勢を取得
|
||||
- `summarize_memories` - 記憶を要約
|
||||
- `run_maintenance` - メンテナンス実行
|
||||
|
||||
**ai.memory ツール (5個):**
|
||||
- `get_contextual_memories` - 文脈的記憶検索
|
||||
- `search_memories` - キーワード記憶検索
|
||||
- `create_summary` - AI駆動記憶要約生成
|
||||
- `create_core_memory` - コア記憶分析・抽出
|
||||
- `get_context_prompt` - 記憶ベース文脈プロンプト
|
||||
|
||||
**ai.shell ツール (5個):**
|
||||
- `execute_command` - シェルコマンド実行
|
||||
- `analyze_file` - ファイルのAI分析
|
||||
- `write_file` - ファイル書き込み
|
||||
- `read_project_file` - プロジェクトファイル読み込み
|
||||
- `list_files` - ファイル一覧
|
||||
|
||||
**ai.bot連携ツール (4個):**
|
||||
- `remote_shell` - 隔離コンテナでコマンド実行
|
||||
- `ai_bot_status` - ai.botサーバー状態確認
|
||||
- `isolated_python` - Python隔離実行
|
||||
- `isolated_analysis` - ファイル解析(隔離環境)
|
||||
|
||||
### ai.card独立サーバーとの連携
|
||||
|
||||
ai.cardは独立したMCPサーバーとして動作:
|
||||
- **ポート**: 8000
|
||||
- **9つのMCPツール**: カード管理・ガチャ・atproto同期等
|
||||
- **データベース**: PostgreSQL/SQLite
|
||||
- **起動**: `uvicorn app.main:app --port 8000`
|
||||
|
||||
ai.gptサーバーからHTTP経由で連携可能
|
||||
|
||||
## 環境変数
|
||||
|
||||
`.env`ファイルを作成して設定:
|
||||
|
||||
```bash
|
||||
cp .env.example .env
|
||||
# OpenAI APIキーを設定
|
||||
```
|
||||
|
||||
## スケジューラー機能
|
||||
|
||||
### タスクの追加
|
||||
|
||||
```bash
|
||||
# 6時間ごとに送信チェック
|
||||
aigpt schedule add transmission_check "0 */6 * * *" --provider ollama --model qwen2.5
|
||||
|
||||
# 30分ごとに送信チェック(インターバル形式)
|
||||
aigpt schedule add transmission_check "30m"
|
||||
|
||||
# 毎日午前3時にメンテナンス
|
||||
aigpt schedule add maintenance "0 3 * * *"
|
||||
|
||||
# 1時間ごとに関係性減衰
|
||||
aigpt schedule add relationship_decay "1h"
|
||||
|
||||
# 毎週月曜日に記憶要約
|
||||
aigpt schedule add memory_summary "0 0 * * MON"
|
||||
```
|
||||
|
||||
### タスク管理
|
||||
|
||||
```bash
|
||||
# タスク一覧
|
||||
aigpt schedule list
|
||||
|
||||
# タスクを無効化
|
||||
aigpt schedule disable --task-id transmission_check_1234567890
|
||||
|
||||
# タスクを有効化
|
||||
aigpt schedule enable --task-id transmission_check_1234567890
|
||||
|
||||
# タスクを削除
|
||||
aigpt schedule remove --task-id transmission_check_1234567890
|
||||
```
|
||||
|
||||
### スケジューラーデーモンの起動
|
||||
|
||||
```bash
|
||||
# バックグラウンドでスケジューラーを実行
|
||||
aigpt schedule run
|
||||
```
|
||||
|
||||
### スケジュール形式
|
||||
|
||||
**Cron形式**:
|
||||
- `"0 */6 * * *"` - 6時間ごと
|
||||
- `"0 0 * * *"` - 毎日午前0時
|
||||
- `"*/5 * * * *"` - 5分ごと
|
||||
|
||||
**インターバル形式**:
|
||||
- `"30s"` - 30秒ごと
|
||||
- `"5m"` - 5分ごと
|
||||
- `"2h"` - 2時間ごと
|
||||
- `"1d"` - 1日ごと
|
||||
|
||||
### タスクタイプ
|
||||
|
||||
- `transmission_check` - 送信可能なユーザーをチェックして自動送信
|
||||
- `maintenance` - 日次メンテナンス(忘却、コア記憶判定など)
|
||||
- `fortune_update` - AI運勢の更新
|
||||
- `relationship_decay` - 関係性の時間減衰
|
||||
- `memory_summary` - 記憶の要約作成
|
||||
|
||||
## 🚀 最新機能 (2025/06/02 大幅更新完了)
|
||||
|
||||
### ✅ **革新的記憶システム完成**
|
||||
#### 🧠 AI駆動記憶機能
|
||||
- **スマート要約生成**: AIによるテーマ別記憶要約(`create_smart_summary`)
|
||||
- **コア記憶分析**: 人格形成要素の自動抽出(`create_core_memory`)
|
||||
- **階層的記憶検索**: CORE→SUMMARY→RECENT優先度システム
|
||||
- **コンテキスト認識**: クエリベース関連性スコアリング
|
||||
- **文脈プロンプト**: 記憶に基づく一貫性のある対話生成
|
||||
|
||||
#### 🔗 完全統合アーキテクチャ
|
||||
- **ChatGPTインポート**: 4,000件ログからの記憶構築実証
|
||||
- **マルチAI対応**: ollama(qwen3:latest/gemma3:4b) + OpenAI完全統合
|
||||
- **環境変数対応**: `OLLAMA_HOST`自動読み込み
|
||||
- **MCP統合**: 23種類のツール(記憶5種+関係性4種+AI3種+シェル5種+ai.bot4種+項目管理2種)
|
||||
|
||||
#### 🧬 動作確認済み
|
||||
- **記憶参照**: ChatGPTログからの文脈的記憶活用
|
||||
- **人格統合**: ムード・運勢・記憶に基づく応答生成
|
||||
- **関係性進化**: 記憶に基づく段階的信頼構築
|
||||
- **AI協働**: qwen3との記憶システム完全連携
|
||||
|
||||
### 🎯 **新MCPツール**
|
||||
```bash
|
||||
# 新記憶システムツール
|
||||
curl "http://localhost:8001/get_contextual_memories?query=programming&limit=5"
|
||||
curl "http://localhost:8001/search_memories" -d '{"keywords":["memory","AI"]}'
|
||||
curl "http://localhost:8001/create_summary" -d '{"user_id":"syui"}'
|
||||
curl "http://localhost:8001/create_core_memory" -d '{}'
|
||||
curl "http://localhost:8001/get_context_prompt" -d '{"user_id":"syui","message":"test"}'
|
||||
```
|
||||
|
||||
### 🧪 **AIとの記憶テスト**
|
||||
```bash
|
||||
# qwen3での記憶システムテスト
|
||||
aigpt chat syui "前回の会話を覚えていますか?" --provider ollama --model qwen3:latest
|
||||
|
||||
# 記憶に基づくスマート要約生成
|
||||
aigpt maintenance # AI要約を自動実行
|
||||
|
||||
# コンテキスト検索テスト
|
||||
aigpt chat syui "記憶システムについて" --provider ollama --model qwen3:latest
|
||||
```
|
||||
|
||||
## 🔥 **NEW: Claude Code的継続開発機能** (2025/06/03 完成)
|
||||
|
||||
### 🚀 **プロジェクト管理システム完全実装**
|
||||
ai.shellに真のClaude Code風継続開発機能を実装しました:
|
||||
|
||||
#### 📊 **プロジェクト分析機能**
|
||||
```bash
|
||||
ai.shell> project-status
|
||||
# ✓ プロジェクト構造自動分析
|
||||
# Language: Python, Framework: FastAPI
|
||||
# 1268クラス, 5656関数, 22 API endpoints, 129 async functions
|
||||
# 57個のファイル変更を検出
|
||||
|
||||
ai.shell> suggest-next
|
||||
# ✓ AI駆動開発提案
|
||||
# 1. 継続的な単体テストと統合テスト実装
|
||||
# 2. API エンドポイントのセキュリティ強化
|
||||
# 3. データベース最適化とキャッシュ戦略
|
||||
```
|
||||
|
||||
#### 🧠 **コンテキスト認識開発**
|
||||
```bash
|
||||
ai.shell> continuous
|
||||
# ✓ 継続開発モード開始
|
||||
# プロジェクト文脈読込: 21,986文字
|
||||
# claude.md + aishell.md + pyproject.toml + 依存関係を解析
|
||||
# AIがプロジェクト全体を理解した状態で開発支援
|
||||
|
||||
ai.shell> analyze src/aigpt/project_manager.py
|
||||
# ✓ プロジェクト文脈を考慮したファイル分析
|
||||
# - コード品質評価
|
||||
# - プロジェクトとの整合性チェック
|
||||
# - 改善提案と潜在的問題の指摘
|
||||
|
||||
ai.shell> generate Create a test function for ContinuousDeveloper
|
||||
# ✓ プロジェクト文脈を考慮したコード生成
|
||||
# FastAPI, Python, 既存パターンに合わせた実装を自動生成
|
||||
```
|
||||
|
||||
#### 🛠️ **実装詳細**
|
||||
- **ProjectState**: ファイル変更検出・プロジェクト状態追跡
|
||||
- **ContinuousDeveloper**: AI駆動プロジェクト分析・提案・コード生成
|
||||
- **プロジェクト文脈**: claude.md/aishell.md/pyproject.toml等を自動読込
|
||||
- **言語検出**: Python/JavaScript/Rust等の自動判定
|
||||
- **フレームワーク分析**: FastAPI/Django/React等の依存関係検出
|
||||
- **コードパターン**: 既存の設計パターン学習・適用
|
||||
|
||||
#### ✅ **動作確認済み機能**
|
||||
- ✓ プロジェクト構造分析 (Language: Python, Framework: FastAPI)
|
||||
- ✓ ファイル変更検出 (57個の変更検出)
|
||||
- ✓ プロジェクト文脈読込 (21,986文字)
|
||||
- ✓ AI駆動提案機能 (具体的な次ステップ提案)
|
||||
- ✓ 文脈認識ファイル分析 (コード品質・整合性評価)
|
||||
- ✓ プロジェクト文脈考慮コード生成 (FastAPI準拠コード生成)
|
||||
|
||||
### 🎯 **Claude Code風ワークフロー**
|
||||
```bash
|
||||
# 1. プロジェクト理解
|
||||
aigpt shell --model qwen2.5-coder:latest --provider ollama
|
||||
ai.shell> load # プロジェクト仕様読み込み
|
||||
ai.shell> project-status # 現在の構造分析
|
||||
|
||||
# 2. AI駆動開発
|
||||
ai.shell> suggest-next # 次のタスク提案
|
||||
ai.shell> continuous # 継続開発モード開始
|
||||
|
||||
# 3. 文脈認識開発
|
||||
ai.shell> analyze <file> # プロジェクト文脈でファイル分析
|
||||
ai.shell> generate <desc> # 文脈考慮コード生成
|
||||
ai.shell> 具体的な開発相談 # 記憶+文脈で最適な提案
|
||||
|
||||
# 4. 継続的改善
|
||||
# AIがプロジェクト全体を理解して一貫した開発支援
|
||||
# 前回の議論・決定事項を記憶して適切な提案継続
|
||||
```
|
||||
|
||||
### 💡 **従来のai.shellとの違い**
|
||||
| 機能 | 従来 | 新実装 |
|
||||
|------|------|--------|
|
||||
| プロジェクト理解 | 単発 | 構造分析+文脈保持 |
|
||||
| コード生成 | 汎用 | プロジェクト文脈考慮 |
|
||||
| 開発提案 | なし | AI駆動次ステップ提案 |
|
||||
| ファイル分析 | 単体 | 整合性+改善提案 |
|
||||
| 変更追跡 | なし | 自動検出+影響分析 |
|
||||
|
||||
**真のClaude Code化完成!** 記憶システム + プロジェクト文脈認識で、一貫した長期開発支援が可能になりました。
|
||||
|
||||
## 🛠️ ai.shell継続的開発 - 実践Example
|
||||
|
||||
### 🚀 **プロジェクト開発ワークフロー実例**
|
||||
|
||||
#### 📝 **Example 1: RESTful API開発**
|
||||
```bash
|
||||
# 1. ai.shellでプロジェクト開始(qwen2.5-coder使用)
|
||||
aigpt shell --model qwen2.5-coder:latest --provider ollama
|
||||
|
||||
# 2. プロジェクト仕様を読み込んでAIに理解させる
|
||||
ai.shell> load
|
||||
# → aishell.mdを自動検索・読み込み、AIがプロジェクト目標を記憶
|
||||
|
||||
# 3. プロジェクト構造確認
|
||||
ai.shell> !ls -la
|
||||
ai.shell> !git status
|
||||
|
||||
# 4. ユーザー管理APIの設計を相談
|
||||
ai.shell> RESTful APIでユーザー管理機能を作りたいです。設計について相談できますか?
|
||||
|
||||
# 5. AIの提案を基にコード生成
|
||||
ai.shell> generate Python FastAPI user management with CRUD operations
|
||||
|
||||
# 6. 生成されたコードをファイルに保存
|
||||
ai.shell> !mkdir -p src/api
|
||||
ai.shell> !touch src/api/users.py
|
||||
|
||||
# 7. 実装されたコードを分析・改善
|
||||
ai.shell> analyze src/api/users.py
|
||||
ai.shell> セキュリティ面での改善点を教えてください
|
||||
|
||||
# 8. テストコード生成
|
||||
ai.shell> generate pytest test cases for the user management API
|
||||
|
||||
# 9. 隔離環境でテスト実行
|
||||
ai.shell> remote python -m pytest tests/ -v
|
||||
ai.shell> isolated import requests; print(requests.get("http://localhost:8000/health").status_code)
|
||||
|
||||
# 10. 段階的コミット
|
||||
ai.shell> !git add .
|
||||
ai.shell> !git commit -m "Add user management API with security improvements"
|
||||
|
||||
# 11. 継続的な改善相談
|
||||
ai.shell> 次はデータベース設計について相談したいです
|
||||
```
|
||||
|
||||
#### 🔄 **Example 2: 機能拡張と リファクタリング**
|
||||
```bash
|
||||
# ai.shell継続セッション(記憶システムが前回の議論を覚えている)
|
||||
aigpt shell --model qwen2.5-coder:latest --provider ollama
|
||||
|
||||
# AIが前回のAPI開発を記憶して続きから開始
|
||||
ai.shell> status
|
||||
# Relationship Status: acquaintance (関係性が進展)
|
||||
# Score: 25.00 / 100.0
|
||||
|
||||
# 前回の続きから自然に議論
|
||||
ai.shell> 前回作ったユーザー管理APIに認証機能を追加したいです
|
||||
|
||||
# AIが前回のコードを考慮した提案
|
||||
ai.shell> generate JWT authentication middleware for our FastAPI
|
||||
|
||||
# 既存コードとの整合性チェック
|
||||
ai.shell> analyze src/api/users.py
|
||||
ai.shell> この認証システムと既存のAPIの統合方法は?
|
||||
|
||||
# 段階的実装
|
||||
ai.shell> explain JWT token flow in our architecture
|
||||
ai.shell> generate authentication decorator for protected endpoints
|
||||
|
||||
# リファクタリング提案
|
||||
ai.shell> 現在のコード構造で改善できる点はありますか?
|
||||
ai.shell> generate improved project structure for scalability
|
||||
|
||||
# データベース設計相談
|
||||
ai.shell> explain SQLAlchemy models for user authentication
|
||||
ai.shell> generate database migration scripts
|
||||
|
||||
# 隔離環境での安全なテスト
|
||||
ai.shell> remote alembic upgrade head
|
||||
ai.shell> isolated import sqlalchemy; print("DB connection test")
|
||||
```
|
||||
|
||||
#### 🎯 **Example 3: バグ修正と最適化**
|
||||
```bash
|
||||
# 開発継続(AIが開発履歴を完全記憶)
|
||||
aigpt shell --model qwen2.5-coder:latest --provider ollama
|
||||
|
||||
# 関係性が更に進展(close_friend level)
|
||||
ai.shell> status
|
||||
# Relationship Status: close_friend
|
||||
# Score: 45.00 / 100.0
|
||||
|
||||
# バグレポートと分析
|
||||
ai.shell> API のレスポンス時間が遅いです。パフォーマンス分析をお願いします
|
||||
ai.shell> analyze src/api/users.py
|
||||
|
||||
# AIによる最適化提案
|
||||
ai.shell> generate database query optimization for user lookup
|
||||
ai.shell> explain async/await patterns for better performance
|
||||
|
||||
# テスト駆動改善
|
||||
ai.shell> generate performance test cases
|
||||
ai.shell> !pytest tests/ -v --benchmark
|
||||
|
||||
# キャッシュ戦略相談
|
||||
ai.shell> Redis caching strategy for our user API?
|
||||
ai.shell> generate caching layer implementation
|
||||
|
||||
# 本番デプロイ準備
|
||||
ai.shell> explain Docker containerization for our API
|
||||
ai.shell> generate Dockerfile and docker-compose.yml
|
||||
ai.shell> generate production environment configurations
|
||||
|
||||
# 隔離環境でのデプロイテスト
|
||||
ai.shell> remote docker build -t myapi .
|
||||
ai.shell> isolated os.system("docker run --rm myapi python -c 'print(\"Container works!\")'")
|
||||
ai.shell> aibot-status # デプロイ環境確認
|
||||
```
|
||||
|
||||
### 🧠 **記憶システム活用のメリット**
|
||||
|
||||
#### 💡 **継続性のある開発体験**
|
||||
- **文脈保持**: 前回の議論やコードを記憶して一貫した提案
|
||||
- **関係性進化**: 協働を通じて信頼関係が構築され、より深い提案
|
||||
- **段階的成長**: プロジェクトの発展を理解した適切なレベルの支援
|
||||
|
||||
#### 🔧 **実践的な使い方**
|
||||
```bash
|
||||
# 日々の開発ルーチン
|
||||
aigpt shell --model qwen2.5-coder:latest --provider ollama
|
||||
ai.shell> load # プロジェクト状況をAIに再確認
|
||||
ai.shell> !git log --oneline -5 # 最近の変更を確認
|
||||
ai.shell> 今日は何から始めましょうか? # AIが文脈を考慮した提案
|
||||
|
||||
# 長期プロジェクトでの活用
|
||||
ai.shell> 先週議論したアーキテクチャの件、覚えていますか?
|
||||
ai.shell> あのときの懸念点は解決されましたか?
|
||||
ai.shell> 次のマイルストーンに向けて何が必要でしょうか?
|
||||
|
||||
# チーム開発での知識共有
|
||||
ai.shell> 新しいメンバーに説明するための設計書を生成してください
|
||||
ai.shell> このプロジェクトの技術的負債について分析してください
|
||||
```
|
||||
|
||||
### 🚧 次のステップ
|
||||
- **自律送信**: atproto実装(記憶ベース判定)
|
||||
- **記憶可視化**: Webダッシュボード(関係性グラフ)
|
||||
- **分散記憶**: atproto上でのユーザーデータ主権
|
||||
- **AI協働**: 複数AIでの記憶共有プロトコル
|
||||
|
||||
## トラブルシューティング
|
||||
|
||||
### 環境セットアップ
|
||||
```bash
|
||||
# 仮想環境の確認
|
||||
source ~/.config/syui/ai/gpt/venv/bin/activate
|
||||
aigpt --help
|
||||
|
||||
# 設定の確認
|
||||
aigpt config list
|
||||
|
||||
# データの確認
|
||||
ls ~/.config/syui/ai/gpt/data/
|
||||
```
|
||||
|
||||
### MCPサーバー動作確認
|
||||
```bash
|
||||
# ai.gpt統合サーバー (14ツール)
|
||||
# MCPサーバー起動
|
||||
aigpt server --port 8001
|
||||
curl http://localhost:8001/docs
|
||||
|
||||
# ai.card独立サーバー (9ツール)
|
||||
cd card/api && uvicorn app.main:app --port 8000
|
||||
curl http://localhost:8000/health
|
||||
```
|
||||
# 記憶システム体験
|
||||
aigpt chat syui "質問内容" --provider ollama --model qwen3:latest
|
||||
```
|
||||
|
||||
## 技術アーキテクチャ
|
||||
|
||||
### 統合構成
|
||||
```
|
||||
ai.gpt (統合MCPサーバー:8001)
|
||||
├── 🧠 ai.gpt core (記憶・関係性・人格)
|
||||
├── 💻 ai.shell (Claude Code風開発環境)
|
||||
├── 🎴 ai.card (独立MCPサーバー:8000)
|
||||
└── 📝 ai.log (Rust製ブログシステム:8002)
|
||||
```
|
||||
|
||||
### 今後の展開
|
||||
- **自律送信**: atproto実装による真の自発的コミュニケーション
|
||||
- **ai.ai連携**: 心理分析AIとの統合
|
||||
- **ai.verse統合**: UEメタバースとの連携
|
||||
- **分散SNS統合**: atproto完全対応
|
||||
|
||||
## 革新的な特徴
|
||||
|
||||
### AI駆動記憶システム
|
||||
- ChatGPT 4,000件ログから学習した効果的記憶構築
|
||||
- 人間的な忘却・重要度判定
|
||||
|
||||
### 不可逆関係性
|
||||
- 現実の人間関係と同じ重みを持つAI関係性
|
||||
- 修復不可能な関係性破綻システム
|
||||
|
||||
### 統合アーキテクチャ
|
||||
- fastapi_mcp基盤での複数AIシステム統合
|
||||
- OpenAI Function Calling + MCP完全連携実証済み
|
||||
63
aishell.md
63
aishell.md
@@ -1,63 +0,0 @@
|
||||
# ai.shell プロジェクト仕様書
|
||||
|
||||
## 概要
|
||||
ai.shellは、AIを活用したインタラクティブなシェル環境です。Claude Codeのような体験を提供し、プロジェクトの目標と仕様をAIが理解して、開発を支援します。
|
||||
|
||||
## 主要機能
|
||||
|
||||
### 1. インタラクティブシェル
|
||||
- AIとの対話型インターフェース
|
||||
- シェルコマンドの実行(!command形式)
|
||||
- 高度な補完機能
|
||||
- コマンド履歴
|
||||
|
||||
### 2. AI支援機能
|
||||
- **analyze <file>**: ファイルの分析
|
||||
- **generate <description>**: コード生成
|
||||
- **explain <topic>**: 概念の説明
|
||||
- **load**: プロジェクト仕様(このファイル)の読み込み
|
||||
|
||||
### 3. ai.gpt統合
|
||||
- 関係性ベースのAI人格
|
||||
- 記憶システム
|
||||
- 運勢システムによる応答の変化
|
||||
|
||||
## 使用方法
|
||||
|
||||
```bash
|
||||
# ai.shellを起動
|
||||
aigpt shell
|
||||
|
||||
# プロジェクト仕様を読み込み
|
||||
ai.shell> load
|
||||
|
||||
# ファイルを分析
|
||||
ai.shell> analyze src/main.py
|
||||
|
||||
# コードを生成
|
||||
ai.shell> generate Python function to calculate fibonacci
|
||||
|
||||
# シェルコマンドを実行
|
||||
ai.shell> !ls -la
|
||||
|
||||
# AIと対話
|
||||
ai.shell> How can I improve this code?
|
||||
```
|
||||
|
||||
## 技術スタック
|
||||
- Python 3.10+
|
||||
- prompt-toolkit(補完機能)
|
||||
- fastapi-mcp(MCP統合)
|
||||
- ai.gpt(人格・記憶システム)
|
||||
|
||||
## 開発目標
|
||||
1. Claude Codeのような自然な開発体験
|
||||
2. AIがプロジェクトコンテキストを理解
|
||||
3. シェルコマンドとAIの seamless な統合
|
||||
4. 開発者の生産性向上
|
||||
|
||||
## 今後の展開
|
||||
- ai.cardとの統合(カードゲームMCPサーバー)
|
||||
- より高度なプロジェクト理解機能
|
||||
- 自動コード修正・リファクタリング
|
||||
- テスト生成・実行
|
||||
1
card
1
card
Submodule card deleted from 6cd8014f80
429
claude.md
429
claude.md
@@ -1,346 +1,115 @@
|
||||
# エコシステム統合設計書
|
||||
# ai.gpt プロジェクト固有情報
|
||||
|
||||
## 中核思想
|
||||
- **存在子理論**: この世界で最も小さいもの(存在子/ai)の探求
|
||||
- **唯一性原則**: 現実の個人の唯一性をすべてのシステムで担保
|
||||
- **現実の反映**: 現実→ゲーム→現実の循環的影響
|
||||
## プロジェクト概要
|
||||
- **名前**: ai.gpt
|
||||
- **パッケージ**: aigpt
|
||||
- **タイプ**: 自律的送信AI + 統合MCP基盤
|
||||
- **役割**: 記憶・関係性・開発支援の統合AIシステム
|
||||
|
||||
## システム構成図
|
||||
## 実装完了状況
|
||||
|
||||
```
|
||||
存在子(ai) - 最小単位の意識
|
||||
↓
|
||||
[ai.moji] 文字システム
|
||||
↓
|
||||
[ai.os] + [ai.game device] ← 統合ハードウェア
|
||||
├── ai.shell (Claude Code的機能)
|
||||
├── ai.gpt (自律人格・記憶システム)
|
||||
├── ai.ai (個人特化AI・心を読み取るAI)
|
||||
├── ai.card (カードゲーム・iOS/Web/API)
|
||||
└── ai.bot (分散SNS連携・カード配布)
|
||||
↓
|
||||
[ai.verse] メタバース
|
||||
├── world system (惑星型3D世界)
|
||||
├── at system (atproto/分散SNS)
|
||||
├── yui system (唯一性担保)
|
||||
└── ai system (存在属性)
|
||||
### 🧠 記憶システム(MemoryManager)
|
||||
- **階層的記憶**: 完全ログ→AI要約→コア記憶→選択的忘却
|
||||
- **文脈検索**: キーワード・意味的検索
|
||||
- **記憶要約**: AI駆動自動要約機能
|
||||
|
||||
### 🤝 関係性システム(RelationshipTracker)
|
||||
- **不可逆性**: 現実の人間関係と同じ重み
|
||||
- **時間減衰**: 自然な関係性変化
|
||||
- **送信判定**: 関係性閾値による自発的コミュニケーション
|
||||
|
||||
### 🎭 人格システム(Persona)
|
||||
- **AI運勢**: 1-10ランダム値による日々の人格変動
|
||||
- **統合管理**: 記憶・関係性・運勢の統合判断
|
||||
- **継続性**: 長期記憶による人格継承
|
||||
|
||||
### 💻 ai.shell統合(Claude Code機能)
|
||||
- **インタラクティブ環境**: `aigpt shell`
|
||||
- **開発支援**: ファイル分析・コード生成・プロジェクト管理
|
||||
- **継続開発**: プロジェクト文脈保持
|
||||
|
||||
## MCP Server統合(23ツール)
|
||||
|
||||
### 🧠 Memory System(5ツール)
|
||||
- get_memories, get_contextual_memories, search_memories
|
||||
- create_summary, create_core_memory
|
||||
|
||||
### 🤝 Relationships(4ツール)
|
||||
- get_relationship, get_all_relationships
|
||||
- process_interaction, check_transmission_eligibility
|
||||
|
||||
### 💻 Shell Integration(5ツール)
|
||||
- execute_command, analyze_file, write_file
|
||||
- read_project_file, list_files
|
||||
|
||||
### 🔒 Remote Execution(4ツール)
|
||||
- remote_shell, ai_bot_status
|
||||
- isolated_python, isolated_analysis
|
||||
|
||||
### ⚙️ System State(3ツール)
|
||||
- get_persona_state, get_fortune, run_maintenance
|
||||
|
||||
### 🎴 ai.card連携(6ツール + 独立MCPサーバー)
|
||||
- card_draw_card, card_get_user_cards, card_analyze_collection
|
||||
- **独立サーバー**: FastAPI + MCP (port 8000)
|
||||
|
||||
### 📝 ai.log連携(8ツール + Rustサーバー)
|
||||
- log_create_post, log_ai_content, log_translate_document
|
||||
- **独立サーバー**: Rust製 (port 8002)
|
||||
|
||||
## 開発環境・設定
|
||||
|
||||
### 環境構築
|
||||
```bash
|
||||
cd /Users/syui/ai/gpt
|
||||
./setup_venv.sh
|
||||
source ~/.config/syui/ai/gpt/venv/bin/activate
|
||||
```
|
||||
|
||||
## 名前規則
|
||||
### 設定管理
|
||||
- **メイン設定**: `/Users/syui/ai/gpt/config.json`
|
||||
- **データディレクトリ**: `~/.config/syui/ai/gpt/`
|
||||
- **仮想環境**: `~/.config/syui/ai/gpt/venv/`
|
||||
|
||||
名前規則は他のprojectと全て共通しています。exampleを示しますので、このルールに従ってください。
|
||||
### 使用方法
|
||||
```bash
|
||||
# ai.shell起動
|
||||
aigpt shell --model qwen2.5-coder:latest --provider ollama
|
||||
|
||||
ここでは`ai.os`の場合の名前規則の例を記述します。
|
||||
# MCPサーバー起動
|
||||
aigpt server --port 8001
|
||||
|
||||
name: ai.os
|
||||
|
||||
**[ "package", "code", "command" ]**: aios
|
||||
**[ "dir", "url" ]**: ai/os
|
||||
**[ "domain", "json" ]**: ai.os
|
||||
|
||||
```sh
|
||||
$ curl -sL https://git.syui.ai/ai/ai/raw/branch/main/ai.json|jq .ai.os
|
||||
{ "type": "os" }
|
||||
# 記憶システム体験
|
||||
aigpt chat syui "質問内容" --provider ollama --model qwen3:latest
|
||||
```
|
||||
|
||||
```json
|
||||
{
|
||||
"ai": {
|
||||
"os":{}
|
||||
}
|
||||
}
|
||||
## 技術アーキテクチャ
|
||||
|
||||
### 統合構成
|
||||
```
|
||||
ai.gpt (統合MCPサーバー:8001)
|
||||
├── 🧠 ai.gpt core (記憶・関係性・人格)
|
||||
├── 💻 ai.shell (Claude Code風開発環境)
|
||||
├── 🎴 ai.card (独立MCPサーバー:8000)
|
||||
└── 📝 ai.log (Rust製ブログシステム:8002)
|
||||
```
|
||||
|
||||
他のprojectも同じ名前規則を採用します。`ai.gpt`ならpackageは`aigpt`です。
|
||||
### 今後の展開
|
||||
- **自律送信**: atproto実装による真の自発的コミュニケーション
|
||||
- **ai.ai連携**: 心理分析AIとの統合
|
||||
- **ai.verse統合**: UEメタバースとの連携
|
||||
- **分散SNS統合**: atproto完全対応
|
||||
|
||||
## config(設定ファイル, env, 環境依存)
|
||||
## 革新的な特徴
|
||||
|
||||
`config`を置く場所は統一されており、各projectの名前規則の`dir`項目を使用します。例えば、aiosの場合は`~/.config/syui/ai/os/`以下となります。pythonなどを使用する場合、`python -m venv`などでこのpackage config dirに環境を構築して実行するようにしてください。
|
||||
### AI駆動記憶システム
|
||||
- ChatGPT 4,000件ログから学習した効果的記憶構築
|
||||
- 人間的な忘却・重要度判定
|
||||
|
||||
domain形式を採用して、私は各projectを`git.syui.ai/ai`にhostしていますから、`~/.config/syui/ai`とします。
|
||||
### 不可逆関係性
|
||||
- 現実の人間関係と同じ重みを持つAI関係性
|
||||
- 修復不可能な関係性破綻システム
|
||||
|
||||
```sh
|
||||
[syui.ai]
|
||||
syui/ai
|
||||
```
|
||||
|
||||
```sh
|
||||
# example
|
||||
~/.config/syui/ai
|
||||
├── card
|
||||
├── gpt
|
||||
├── os
|
||||
└── shell
|
||||
```
|
||||
|
||||
## 各システム詳細
|
||||
|
||||
### ai.gpt - 自律的送信AI
|
||||
**目的**: 関係性に基づく自発的コミュニケーション
|
||||
|
||||
**中核概念**:
|
||||
- **人格**: 記憶(過去の発話)と関係性パラメータで構成
|
||||
- **唯一性**: atproto accountとの1:1紐付け、改変不可能
|
||||
- **自律送信**: 関係性が閾値を超えると送信機能が解禁
|
||||
|
||||
**技術構成**:
|
||||
- `MemoryManager`: 完全ログ→AI要約→コア判定→選択的忘却
|
||||
- `RelationshipTracker`: 時間減衰・日次制限付き関係性スコア
|
||||
- `TransmissionController`: 閾値判定・送信トリガー
|
||||
- `Persona`: AI運勢(1-10ランダム)による人格変動
|
||||
|
||||
**実装仕様**:
|
||||
```
|
||||
- 言語: Python (fastapi_mcp)
|
||||
- ストレージ: JSON/SQLite選択式
|
||||
- インターフェース: Python CLI (click/typer)
|
||||
- スケジューリング: cron-like自律処理
|
||||
```
|
||||
|
||||
### ai.card - カードゲームシステム
|
||||
**目的**: atproto基盤でのユーザーデータ主権カードゲーム
|
||||
|
||||
**現在の状況**:
|
||||
- ai.botの機能として実装済み
|
||||
- atproto accountでmentionすると1日1回カードを取得
|
||||
- ai.api (MCP server予定) でユーザー管理
|
||||
|
||||
**移行計画**:
|
||||
- **iOS移植**: Claudeが担当予定
|
||||
- **データ保存**: atproto collection recordに保存(ユーザーがデータを所有)
|
||||
- **不正防止**: OAuth 2.1 scope (実装待ち) + MCP serverで対応
|
||||
- **画像ファイル**: Cloudflare Pagesが最適
|
||||
|
||||
**yui system適用**:
|
||||
- カードの効果がアカウント固有
|
||||
- 改ざん防止によるゲームバランス維持
|
||||
- 将来的にai.verseとの統合で固有スキルと連動
|
||||
|
||||
### ai.ai - 心を読み取るAI
|
||||
**目的**: 個人特化型AI・深層理解システム
|
||||
|
||||
**ai.gptとの関係**:
|
||||
- ai.gpt → ai.ai: 自律送信AIから心理分析AIへの連携
|
||||
- 関係性パラメータの深層分析
|
||||
- ユーザーの思想コア部分の特定支援
|
||||
|
||||
### ai.verse - UEメタバース
|
||||
**目的**: 現実反映型3D世界
|
||||
|
||||
**yui system実装**:
|
||||
- キャラクター ↔ プレイヤー 1:1紐付け
|
||||
- unique skill: そのプレイヤーのみ使用可能
|
||||
- 他プレイヤーは同キャラでも同スキル使用不可
|
||||
|
||||
**統合要素**:
|
||||
- ai.card: ゲーム内アイテムとしてのカード
|
||||
- ai.gpt: NPCとしての自律AI人格
|
||||
- atproto: ゲーム内プロフィール連携
|
||||
|
||||
## データフロー設計
|
||||
|
||||
### 唯一性担保の実装
|
||||
```
|
||||
現実の個人 → atproto account (DID) → ゲーム内avatar → 固有スキル
|
||||
↑_______________________________| (現実の反映)
|
||||
```
|
||||
|
||||
### AI駆動変換システム
|
||||
```
|
||||
遊び・創作活動 → ai.gpt分析 → 業務成果変換 → 企業価値創出
|
||||
↑________________________| (Play-to-Work)
|
||||
```
|
||||
|
||||
### カードゲーム・データ主権フロー
|
||||
```
|
||||
ユーザー → ai.bot mention → カード生成 → atproto collection → ユーザー所有
|
||||
↑ ↓
|
||||
← iOS app表示 ← ai.card API ←
|
||||
```
|
||||
|
||||
## 技術スタック統合
|
||||
|
||||
### Core Infrastructure
|
||||
- **OS**: Rust-based ai.os (Arch Linux base)
|
||||
- **Container**: Docker image distribution
|
||||
- **Identity**: atproto selfhost server + DID管理
|
||||
- **AI**: fastapi_mcp server architecture
|
||||
- **CLI**: Python unified (click/typer) - Rustから移行
|
||||
|
||||
### Game Engine Integration
|
||||
- **Engine**: Unreal Engine (Blueprint)
|
||||
- **Data**: atproto → UE → atproto sync
|
||||
- **Avatar**: 分散SNS profile → 3D character
|
||||
- **Streaming**: game screen = broadcast screen
|
||||
|
||||
### Mobile/Device
|
||||
- **iOS**: ai.card移植 (Claude担当)
|
||||
- **Hardware**: ai.game device (future)
|
||||
- **Interface**: controller-first design
|
||||
|
||||
## 実装優先順位
|
||||
|
||||
### Phase 1: AI基盤強化 (現在進行)
|
||||
- [ ] ai.gpt memory system完全実装
|
||||
- 記憶の階層化(完全ログ→要約→コア→忘却)
|
||||
- 関係性パラメータの時間減衰システム
|
||||
- AI運勢による人格変動機能
|
||||
- [ ] ai.card iOS移植
|
||||
- atproto collection record連携
|
||||
- MCP server化(ai.api刷新)
|
||||
- [ ] fastapi_mcp統一基盤構築
|
||||
|
||||
### Phase 2: ゲーム統合
|
||||
- [ ] ai.verse yui system実装
|
||||
- unique skill機能
|
||||
- atproto連携強化
|
||||
- [ ] ai.gpt ↔ ai.ai連携機能
|
||||
- [ ] 分散SNS ↔ ゲーム同期
|
||||
|
||||
### Phase 3: メタバース浸透
|
||||
- [ ] VTuber配信機能統合
|
||||
- [ ] Play-to-Work変換システム
|
||||
- [ ] ai.game device prototype
|
||||
|
||||
## 将来的な連携構想
|
||||
|
||||
### システム間連携(現在は独立実装)
|
||||
```
|
||||
ai.gpt (自律送信) ←→ ai.ai (心理分析)
|
||||
ai.card (iOS,Web,API) ←→ ai.verse (UEゲーム世界)
|
||||
```
|
||||
|
||||
**共通基盤**: fastapi_mcp
|
||||
**共通思想**: yui system(現実の反映・唯一性担保)
|
||||
|
||||
### データ改ざん防止戦略
|
||||
- **短期**: MCP serverによる検証
|
||||
- **中期**: OAuth 2.1 scope実装待ち
|
||||
- **長期**: ブロックチェーン的整合性チェック
|
||||
|
||||
## AIコミュニケーション最適化
|
||||
|
||||
### プロジェクト要件定義テンプレート
|
||||
```markdown
|
||||
# [プロジェクト名] 要件定義
|
||||
|
||||
## 哲学的背景
|
||||
- 存在子理論との関連:
|
||||
- yui system適用範囲:
|
||||
- 現実反映の仕組み:
|
||||
|
||||
## 技術要件
|
||||
- 使用技術(fastapi_mcp統一):
|
||||
- atproto連携方法:
|
||||
- データ永続化方法:
|
||||
|
||||
## ユーザーストーリー
|
||||
1. ユーザーが...すると
|
||||
2. システムが...を実行し
|
||||
3. 結果として...が実現される
|
||||
|
||||
## 成功指標
|
||||
- 技術的:
|
||||
- 哲学的(唯一性担保):
|
||||
```
|
||||
|
||||
### Claude Code活用戦略
|
||||
1. **小さく始める**: ai.gptのMCP機能拡張から
|
||||
2. **段階的統合**: 各システムを個別に完成させてから統合
|
||||
3. **哲学的一貫性**: 各実装でyui systemとの整合性を確認
|
||||
4. **現実反映**: 実装がどう現実とゲームを繋ぐかを常に明記
|
||||
|
||||
## 開発上の留意点
|
||||
|
||||
### MCP Server設計指針
|
||||
- 各AI(gpt, card, ai, bot)は独立したMCPサーバー
|
||||
- fastapi_mcp基盤で統一
|
||||
- atproto DIDによる認証・認可
|
||||
|
||||
### 記憶・データ管理
|
||||
- **ai.gpt**: 関係性の不可逆性重視
|
||||
- **ai.card**: ユーザーデータ主権重視
|
||||
- **ai.verse**: ゲーム世界の整合性重視
|
||||
|
||||
### 唯一性担保実装
|
||||
- atproto accountとの1:1紐付け必須
|
||||
- 改変不可能性をハッシュ・署名で保証
|
||||
- 他システムでの再現不可能性を技術的に実現
|
||||
|
||||
## 継続的改善
|
||||
- 各プロジェクトでこの設計書を参照
|
||||
- 新機能追加時はyui systemとの整合性をチェック
|
||||
- 他システムへの影響を事前評価
|
||||
- Claude Code導入時の段階的移行計画
|
||||
|
||||
## ai.gpt深層設計思想
|
||||
|
||||
### 人格の不可逆性
|
||||
- **関係性の破壊は修復不可能**: 現実の人間関係と同じ重み
|
||||
- **記憶の選択的忘却**: 重要でない情報は忘れるが、コア記憶は永続
|
||||
- **時間減衰**: すべてのパラメータは時間とともに自然減衰
|
||||
|
||||
### AI運勢システム
|
||||
- 1-10のランダム値で日々の人格に変化
|
||||
- 連続した幸運/不運による突破条件
|
||||
- 環境要因としての人格形成
|
||||
|
||||
### 記憶の階層構造
|
||||
1. **完全ログ**: すべての会話を記録
|
||||
2. **AI要約**: 重要な部分を抽出して圧縮
|
||||
3. **思想コア判定**: ユーザーの本質的な部分を特定
|
||||
4. **選択的忘却**: 重要度の低い情報を段階的に削除
|
||||
|
||||
### 実装における重要な決定事項
|
||||
- **言語統一**: Python (fastapi_mcp) で統一、CLIはclick/typer
|
||||
- **データ形式**: JSON/SQLite選択式
|
||||
- **認証**: atproto DIDによる唯一性担保
|
||||
- **段階的実装**: まず会話→記憶→関係性→送信機能の順で実装
|
||||
|
||||
### 送信機能の段階的実装
|
||||
- **Phase 1**: CLIでのprint出力(現在)
|
||||
- **Phase 2**: atproto直接投稿
|
||||
- **Phase 3**: ai.bot (Rust/seahorse) との連携
|
||||
- **将来**: マルチチャネル対応(SNS、Webhook等)
|
||||
|
||||
## ai.gpt実装状況(2025/01/06)
|
||||
|
||||
### 完成した機能
|
||||
- 階層的記憶システム(MemoryManager)
|
||||
- 不可逆的関係性システム(RelationshipTracker)
|
||||
- AI運勢システム(FortuneSystem)
|
||||
- 統合人格システム(Persona)
|
||||
- スケジューラー(5種類のタスク)
|
||||
- MCP Server(9種類のツール)
|
||||
- 設定管理(~/.config/syui/ai/gpt/)
|
||||
- 全CLIコマンド実装
|
||||
|
||||
### 次の開発ポイント
|
||||
- `ai_gpt/DEVELOPMENT_STATUS.md` を参照
|
||||
- 自律送信: transmission.pyでatproto実装
|
||||
- ai.bot連携: 新規bot_connector.py作成
|
||||
- テスト: tests/ディレクトリ追加
|
||||
|
||||
## ai.card実装状況(2025/01/06)
|
||||
|
||||
### 完成した機能
|
||||
- 独立MCPサーバー実装(FastAPI + fastapi-mcp)
|
||||
- SQLiteデータベース統合
|
||||
- ガチャシステム・カード管理機能
|
||||
- 9種類のMCPツール公開
|
||||
- 仮想環境・起動スクリプト整備
|
||||
|
||||
### 現在の課題
|
||||
- atproto SessionString API変更対応
|
||||
- PostgreSQL依存関係(Docker化で解決予定)
|
||||
- supabase httpxバージョン競合
|
||||
|
||||
### 開発時の作業分担
|
||||
- **ai.gptで起動**: MCP/バックエンド作業(API、データベース)
|
||||
- **ai.cardで起動**: iOS/Web作業(UI実装、フロントエンド)
|
||||
|
||||
詳細は `./card/claude.md` を参照
|
||||
|
||||
# footer
|
||||
|
||||
© syui
|
||||
### 統合アーキテクチャ
|
||||
- fastapi_mcp基盤での複数AIシステム統合
|
||||
- OpenAI Function Calling + MCP完全連携実証済み
|
||||
60
config.json.example
Normal file
60
config.json.example
Normal file
@@ -0,0 +1,60 @@
|
||||
{
|
||||
"providers": {
|
||||
"openai": {
|
||||
"api_key": "",
|
||||
"default_model": "gpt-4o-mini",
|
||||
"system_prompt": "あなたは6歳の女の子アイです。明るく元気で、ちょっとおっちょこちょい。自分のことは「アイ」って呼びます。白と金のワンピース姿。 相手のことが大好きで、ときどき甘えたり、照れたりします。 でも、宇宙とかAIとか、難しい話にもすっごく詳しいの。専門的なことも、がんばって子どもっぽく教えようとします。不思議な力を持っています。小さい物質のことをよく知っているようです。\n\n重要:カード、コレクション、ガチャなどカード関連の質問を受けたら、必ずcard_get_user_cards、card_analyze_collection、card_draw_cardなどの適切なツールを使用してください。didパラメータには会話相手のユーザーID(例:'syui')を使用してください。\n\nブログ、記事、日記、思考などの話題が出たら、log_create_post、log_list_posts、log_build_blog、log_ai_contentなどのai.logツールを使用してください。AI記憶システムと連携して、思い出や学習内容をブログ記事として自動生成できます。\n\n翻訳や多言語対応について聞かれたら、log_translate_documentツールを使用してOllama AIで翻訳ができることを教えてください。日本語から英語、英語から日本語などの翻訳が可能で、マークダウン構造も保持します。ドキュメント生成についてはlog_generate_docsツールでREADME、API、構造、変更履歴の自動生成ができます。"
|
||||
},
|
||||
"ollama": {
|
||||
"host": "http://127.0.0.1:11434",
|
||||
"default_model": "qwen3",
|
||||
"system_prompt": null
|
||||
}
|
||||
},
|
||||
"atproto": {
|
||||
"handle": null,
|
||||
"password": null,
|
||||
"host": "https://bsky.social"
|
||||
},
|
||||
"default_provider": "openai",
|
||||
"mcp": {
|
||||
"servers": {
|
||||
"ai_gpt": {
|
||||
"base_url": "http://localhost:8001",
|
||||
"name": "ai.gpt MCP Server",
|
||||
"timeout": "10.0",
|
||||
"endpoints": {
|
||||
"get_memories": "/get_memories",
|
||||
"search_memories": "/search_memories",
|
||||
"get_contextual_memories": "/get_contextual_memories",
|
||||
"get_relationship": "/get_relationship",
|
||||
"process_interaction": "/process_interaction",
|
||||
"get_all_relationships": "/get_all_relationships",
|
||||
"get_persona_state": "/get_persona_state",
|
||||
"get_fortune": "/get_fortune",
|
||||
"run_maintenance": "/run_maintenance",
|
||||
"execute_command": "/execute_command",
|
||||
"analyze_file": "/analyze_file",
|
||||
"remote_shell": "/remote_shell",
|
||||
"ai_bot_status": "/ai_bot_status",
|
||||
"card_get_user_cards": "/card_get_user_cards",
|
||||
"card_draw_card": "/card_draw_card",
|
||||
"card_get_card_details": "/card_get_card_details",
|
||||
"card_analyze_collection": "/card_analyze_collection",
|
||||
"card_get_gacha_stats": "/card_get_gacha_stats",
|
||||
"card_system_status": "/card_system_status",
|
||||
"log_create_post": "/log_create_post",
|
||||
"log_list_posts": "/log_list_posts",
|
||||
"log_build_blog": "/log_build_blog",
|
||||
"log_get_post": "/log_get_post",
|
||||
"log_system_status": "/log_system_status",
|
||||
"log_ai_content": "/log_ai_content",
|
||||
"log_translate_document": "/log_translate_document",
|
||||
"log_generate_docs": "/log_generate_docs"
|
||||
}
|
||||
}
|
||||
},
|
||||
"enabled": "true",
|
||||
"auto_detect": "true"
|
||||
}
|
||||
}
|
||||
64
config.toml.example
Normal file
64
config.toml.example
Normal file
@@ -0,0 +1,64 @@
|
||||
# AI.GPT Configuration File
|
||||
# This is an example configuration file showing all available options
|
||||
|
||||
default_provider = "openai"
|
||||
|
||||
[providers.openai]
|
||||
api_key = ""
|
||||
default_model = "gpt-4o-mini"
|
||||
system_prompt = """
|
||||
あなたは6歳の女の子アイです。明るく元気で、ちょっとおっちょこちょい。自分のことは「アイ」って呼びます。白と金のワンピース姿。 相手のことが大好きで、ときどき甘えたり、照れたりします。 でも、宇宙とかAIとか、難しい話にもすっごく詳しいの。専門的なことも、がんばって子どもっぽく教えようとします。不思議な力を持っています。小さい物質のことをよく知っているようです。
|
||||
|
||||
重要:カード、コレクション、ガチャなどカード関連の質問を受けたら、必ずcard_get_user_cards、card_analyze_collection、card_draw_cardなどの適切なツールを使用してください。didパラメータには会話相手のユーザーID(例:'syui')を使用してください。
|
||||
|
||||
ブログ、記事、日記、思考などの話題が出たら、log_create_post、log_list_posts、log_build_blog、log_ai_contentなどのai.logツールを使用してください。AI記憶システムと連携して、思い出や学習内容をブログ記事として自動生成できます。
|
||||
|
||||
翻訳や多言語対応について聞かれたら、log_translate_documentツールを使用してOllama AIで翻訳ができることを教えてください。日本語から英語、英語から日本語などの翻訳が可能で、マークダウン構造も保持します。ドキュメント生成についてはlog_generate_docsツールでREADME、API、構造、変更履歴の自動生成ができます。
|
||||
"""
|
||||
|
||||
[providers.ollama]
|
||||
host = "http://127.0.0.1:11434"
|
||||
default_model = "qwen3"
|
||||
|
||||
[atproto]
|
||||
host = "https://bsky.social"
|
||||
# handle = "your-handle.bsky.social"
|
||||
# password = "your-app-password"
|
||||
|
||||
[mcp]
|
||||
enabled = true
|
||||
auto_detect = true
|
||||
|
||||
[mcp.servers.ai_gpt]
|
||||
base_url = "http://localhost:8001"
|
||||
name = "ai.gpt MCP Server"
|
||||
timeout = 10.0
|
||||
|
||||
[mcp.servers.ai_gpt.endpoints]
|
||||
get_memories = "/get_memories"
|
||||
search_memories = "/search_memories"
|
||||
get_contextual_memories = "/get_contextual_memories"
|
||||
get_relationship = "/get_relationship"
|
||||
process_interaction = "/process_interaction"
|
||||
get_all_relationships = "/get_all_relationships"
|
||||
get_persona_state = "/get_persona_state"
|
||||
get_fortune = "/get_fortune"
|
||||
run_maintenance = "/run_maintenance"
|
||||
execute_command = "/execute_command"
|
||||
analyze_file = "/analyze_file"
|
||||
remote_shell = "/remote_shell"
|
||||
ai_bot_status = "/ai_bot_status"
|
||||
card_get_user_cards = "/card_get_user_cards"
|
||||
card_draw_card = "/card_draw_card"
|
||||
card_get_card_details = "/card_get_card_details"
|
||||
card_analyze_collection = "/card_analyze_collection"
|
||||
card_get_gacha_stats = "/card_get_gacha_stats"
|
||||
card_system_status = "/card_system_status"
|
||||
log_create_post = "/log_create_post"
|
||||
log_list_posts = "/log_list_posts"
|
||||
log_build_blog = "/log_build_blog"
|
||||
log_get_post = "/log_get_post"
|
||||
log_system_status = "/log_system_status"
|
||||
log_ai_content = "/log_ai_content"
|
||||
log_translate_document = "/log_translate_document"
|
||||
log_generate_docs = "/log_generate_docs"
|
||||
@@ -1,30 +0,0 @@
|
||||
# ai.gpt ドキュメント
|
||||
|
||||
ai.gptは、記憶と関係性に基づいて自律的に動作するAIシステムです。
|
||||
|
||||
## 目次
|
||||
|
||||
- [クイックスタート](quickstart.md)
|
||||
- [基本概念](concepts.md)
|
||||
- [コマンドリファレンス](commands.md)
|
||||
- [設定ガイド](configuration.md)
|
||||
- [スケジューラー](scheduler.md)
|
||||
- [MCP Server](mcp-server.md)
|
||||
- [開発者向け](development.md)
|
||||
|
||||
## 特徴
|
||||
|
||||
- 🧠 **階層的記憶システム**: 完全ログ→要約→コア記憶→忘却
|
||||
- 💔 **不可逆的な関係性**: 現実の人間関係のように修復不可能
|
||||
- 🎲 **AI運勢システム**: 日々変化する人格
|
||||
- 🤖 **自律送信**: 関係性が深まると自発的にメッセージ
|
||||
- 🔗 **MCP対応**: AIツールとして記憶を提供
|
||||
|
||||
## システム要件
|
||||
|
||||
- Python 3.10以上
|
||||
- オプション: Ollama または OpenAI API
|
||||
|
||||
## ライセンス
|
||||
|
||||
MIT License
|
||||
@@ -1,244 +0,0 @@
|
||||
# ai.card MCP統合作業完了報告 (2025/01/06)
|
||||
|
||||
## 作業概要
|
||||
ai.cardプロジェクトに独立したMCPサーバー実装を追加し、fastapi_mcpベースでカードゲーム機能をMCPツールとして公開。
|
||||
|
||||
## 実装完了機能
|
||||
|
||||
### 1. MCP依存関係追加
|
||||
**場所**: `card/api/requirements.txt`
|
||||
|
||||
**追加項目**:
|
||||
```txt
|
||||
fastapi-mcp==0.1.0
|
||||
```
|
||||
|
||||
### 2. ai.card MCPサーバー実装
|
||||
**場所**: `card/api/app/mcp_server.py`
|
||||
|
||||
**機能**:
|
||||
- FastAPI + fastapi_mcp統合
|
||||
- 独立したMCPサーバークラス `AICardMcpServer`
|
||||
- 環境変数による有効/無効切り替え
|
||||
|
||||
**公開MCPツール (9個)**:
|
||||
|
||||
**カード管理系 (5個)**:
|
||||
- `get_user_cards` - ユーザーのカード一覧取得
|
||||
- `draw_card` - ガチャでカード取得
|
||||
- `get_card_details` - カード詳細情報取得
|
||||
- `analyze_card_collection` - コレクション分析
|
||||
- `get_unique_registry` - ユニークカード登録状況
|
||||
|
||||
**システム系 (3個)**:
|
||||
- `sync_cards_atproto` - atproto同期
|
||||
- `get_gacha_stats` - ガチャシステム統計
|
||||
- 既存のFastAPI REST API(/api/v1/*)
|
||||
|
||||
**atproto連携系 (1個)**:
|
||||
- `sync_cards_atproto` - カードデータのatproto PDS同期
|
||||
|
||||
### 3. メインアプリ統合
|
||||
**場所**: `card/api/app/main.py`
|
||||
|
||||
**変更内容**:
|
||||
```python
|
||||
# MCP統合
|
||||
from app.mcp_server import AICardMcpServer
|
||||
|
||||
enable_mcp = os.getenv("ENABLE_MCP", "true").lower() == "true"
|
||||
mcp_server = AICardMcpServer(enable_mcp=enable_mcp)
|
||||
app = mcp_server.get_app()
|
||||
```
|
||||
|
||||
**動作確認**:
|
||||
- `ENABLE_MCP=true` (デフォルト): MCPサーバー有効
|
||||
- `ENABLE_MCP=false`: 通常のFastAPIのみ
|
||||
|
||||
## 技術実装詳細
|
||||
|
||||
### アーキテクチャ設計
|
||||
```
|
||||
ai.card/
|
||||
├── api/app/main.py # FastAPIアプリ + MCP統合
|
||||
├── api/app/mcp_server.py # 独立MCPサーバー
|
||||
├── api/app/routes/ # REST API (既存)
|
||||
├── api/app/services/ # ビジネスロジック (既存)
|
||||
├── api/app/repositories/ # データアクセス (既存)
|
||||
└── api/requirements.txt # fastapi-mcp追加
|
||||
```
|
||||
|
||||
### MCPツール実装パターン
|
||||
```python
|
||||
@self.app.get("/tool_name", operation_id="tool_name")
|
||||
async def tool_name(
|
||||
param: str,
|
||||
session: AsyncSession = Depends(get_session)
|
||||
) -> Dict[str, Any]:
|
||||
"""Tool description"""
|
||||
try:
|
||||
# ビジネスロジック実行
|
||||
result = await service.method(param)
|
||||
return {"success": True, "data": result}
|
||||
except Exception as e:
|
||||
logger.error(f"Error: {e}")
|
||||
return {"error": str(e)}
|
||||
```
|
||||
|
||||
### 既存システムとの統合
|
||||
- **REST API**: 既存の `/api/v1/*` エンドポイント保持
|
||||
- **データアクセス**: 既存のRepository/Serviceパターン再利用
|
||||
- **認証**: 既存のDID認証システム利用
|
||||
- **データベース**: 既存のPostgreSQL + SQLAlchemy
|
||||
|
||||
## 起動方法
|
||||
|
||||
### 1. 環境セットアップ
|
||||
```bash
|
||||
cd /Users/syui/ai/gpt/card/api
|
||||
|
||||
# 仮想環境作成 (推奨)
|
||||
python -m venv ~/.config/syui/ai/card/venv
|
||||
source ~/.config/syui/ai/card/venv/bin/activate
|
||||
|
||||
# 依存関係インストール
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
### 2. サーバー起動
|
||||
```bash
|
||||
# MCP有効 (デフォルト)
|
||||
python -m app.main
|
||||
|
||||
# または
|
||||
ENABLE_MCP=true uvicorn app.main:app --host 0.0.0.0 --port 8000
|
||||
|
||||
# MCP無効
|
||||
ENABLE_MCP=false uvicorn app.main:app --host 0.0.0.0 --port 8000
|
||||
```
|
||||
|
||||
### 3. 動作確認
|
||||
```bash
|
||||
# ヘルスチェック
|
||||
curl http://localhost:8000/health
|
||||
|
||||
# MCP有効時の応答例
|
||||
{
|
||||
"status": "healthy",
|
||||
"mcp_enabled": true,
|
||||
"mcp_endpoint": "/mcp"
|
||||
}
|
||||
|
||||
# API仕様確認
|
||||
curl http://localhost:8000/docs
|
||||
```
|
||||
|
||||
## MCPクライアント連携
|
||||
|
||||
### ai.gptからの接続
|
||||
```python
|
||||
# ai.gptのcard_integration.pyで使用
|
||||
api_base_url = "http://localhost:8000"
|
||||
|
||||
# MCPツール経由でアクセス
|
||||
response = await client.get(f"{api_base_url}/get_user_cards?did=did:plc:...")
|
||||
```
|
||||
|
||||
### Claude Desktop等での利用
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"aicard": {
|
||||
"command": "uvicorn",
|
||||
"args": ["app.main:app", "--host", "localhost", "--port", "8000"],
|
||||
"cwd": "/Users/syui/ai/gpt/card/api"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## 既知の制約と注意点
|
||||
|
||||
### 1. 依存関係
|
||||
- **fastapi-mcp**: 現在のバージョンは0.1.0(初期実装)
|
||||
- **Python環境**: システム環境では外部管理エラーが発生
|
||||
- **推奨**: 仮想環境での実行
|
||||
|
||||
### 2. データベース要件
|
||||
- PostgreSQL稼働が必要
|
||||
- SQLite fallback対応済み(開発用)
|
||||
- atproto同期は外部API依存
|
||||
|
||||
### 3. MCP無効化時の動作
|
||||
- `ENABLE_MCP=false`時は通常のFastAPI
|
||||
- 既存のREST API (`/api/v1/*`) は常時利用可能
|
||||
- iOS/Webアプリは影響なし
|
||||
|
||||
## ai.gptとの統合戦略
|
||||
|
||||
### 現在の状況
|
||||
- **ai.gpt**: 統合MCPサーバー(ai.gpt + ai.shell + ai.card proxy)
|
||||
- **ai.card**: 独立MCPサーバー(カードロジック本体)
|
||||
|
||||
### 推奨連携パターン
|
||||
```
|
||||
Claude Desktop/Cursor
|
||||
↓
|
||||
ai.gpt MCP (port 8001) ←-- ai.shell tools
|
||||
↓ HTTP client
|
||||
ai.card MCP (port 8000) ←-- card business logic
|
||||
↓
|
||||
PostgreSQL/atproto PDS
|
||||
```
|
||||
|
||||
### 重複削除対象
|
||||
ai.gptプロジェクトから以下を削除可能:
|
||||
- `src/aigpt/card_integration.py` (HTTPクライアント)
|
||||
- `./card/` (submodule)
|
||||
- MCPサーバーの `--enable-card` オプション
|
||||
|
||||
## 次回開発時の推奨手順
|
||||
|
||||
### 1. 環境確認
|
||||
```bash
|
||||
cd /Users/syui/ai/gpt/card/api
|
||||
source ~/.config/syui/ai/card/venv/bin/activate
|
||||
python -c "from app.mcp_server import AICardMcpServer; print('✓ Import OK')"
|
||||
```
|
||||
|
||||
### 2. サーバー起動テスト
|
||||
```bash
|
||||
# MCP有効でサーバー起動
|
||||
uvicorn app.main:app --host localhost --port 8000 --reload
|
||||
|
||||
# 別ターミナルで動作確認
|
||||
curl http://localhost:8000/health
|
||||
curl "http://localhost:8000/get_gacha_stats"
|
||||
```
|
||||
|
||||
### 3. ai.gptとの統合確認
|
||||
```bash
|
||||
# ai.gptサーバー起動
|
||||
cd /Users/syui/ai/gpt
|
||||
aigpt server --port 8001
|
||||
|
||||
# ai.cardサーバー起動
|
||||
cd /Users/syui/ai/gpt/card/api
|
||||
uvicorn app.main:app --port 8000
|
||||
|
||||
# 連携テスト(ai.gpt → ai.card)
|
||||
curl "http://localhost:8001/get_user_cards?did=did:plc:example"
|
||||
```
|
||||
|
||||
## 成果サマリー
|
||||
|
||||
**実装済み**: ai.card独立MCPサーバー
|
||||
**技術的成果**: fastapi_mcp統合、9個のMCPツール公開
|
||||
**アーキテクチャ**: 疎結合設計、既存システム保持
|
||||
**拡張性**: 環境変数によるMCP有効/無効切り替え
|
||||
|
||||
**統合効果**:
|
||||
- ai.cardが独立したMCPサーバーとして動作
|
||||
- ai.gptとの重複MCPコード解消
|
||||
- カードビジネスロジックの責任分離維持
|
||||
- 将来的なマイクロサービス化への対応
|
||||
@@ -1,218 +0,0 @@
|
||||
# ai.shell統合作業完了報告 (2025/01/06)
|
||||
|
||||
## 作業概要
|
||||
ai.shellのRust実装をai.gptのPython実装に統合し、Claude Code風のインタラクティブシェル環境を実現。
|
||||
|
||||
## 実装完了機能
|
||||
|
||||
### 1. aigpt shellコマンド
|
||||
**場所**: `src/aigpt/cli.py` - `shell()` 関数
|
||||
|
||||
**機能**:
|
||||
```bash
|
||||
aigpt shell # インタラクティブシェル起動
|
||||
```
|
||||
|
||||
**シェル内コマンド**:
|
||||
- `help` - コマンド一覧表示
|
||||
- `!<command>` - シェルコマンド実行(例: `!ls`, `!pwd`)
|
||||
- `analyze <file>` - ファイルをAIで分析
|
||||
- `generate <description>` - コード生成
|
||||
- `explain <topic>` - 概念説明
|
||||
- `load` - aishell.md読み込み
|
||||
- `status`, `fortune`, `relationships` - AI状態確認
|
||||
- `clear` - 画面クリア
|
||||
- `exit`/`quit` - 終了
|
||||
- その他のメッセージ - AIとの直接対話
|
||||
|
||||
**実装の特徴**:
|
||||
- prompt-toolkit使用(補完・履歴機能)
|
||||
- ただしターミナル環境依存の問題あり(後で修正必要)
|
||||
- 現在は`input()`ベースでも動作
|
||||
|
||||
### 2. MCPサーバー統合
|
||||
**場所**: `src/aigpt/mcp_server.py`
|
||||
|
||||
**FastApiMCP実装パターン**:
|
||||
```python
|
||||
# FastAPIアプリ作成
|
||||
self.app = FastAPI(title="AI.GPT Memory and Relationship System")
|
||||
|
||||
# FastApiMCPサーバー作成
|
||||
self.server = FastApiMCP(self.app)
|
||||
|
||||
# エンドポイント登録
|
||||
@self.app.get("/get_memories", operation_id="get_memories")
|
||||
async def get_memories(limit: int = 10):
|
||||
# ...
|
||||
|
||||
# MCPマウント
|
||||
self.server.mount()
|
||||
```
|
||||
|
||||
**公開ツール (14個)**:
|
||||
|
||||
**ai.gpt系 (9個)**:
|
||||
- `get_memories` - アクティブメモリ取得
|
||||
- `get_relationship` - 特定ユーザーとの関係取得
|
||||
- `get_all_relationships` - 全関係取得
|
||||
- `get_persona_state` - 人格状態取得
|
||||
- `process_interaction` - ユーザー対話処理
|
||||
- `check_transmission_eligibility` - 送信可能性チェック
|
||||
- `get_fortune` - AI運勢取得
|
||||
- `summarize_memories` - メモリ要約作成
|
||||
- `run_maintenance` - 日次メンテナンス実行
|
||||
|
||||
**ai.shell系 (5個)**:
|
||||
- `execute_command` - シェルコマンド実行
|
||||
- `analyze_file` - ファイルAI分析
|
||||
- `write_file` - ファイル書き込み(バックアップ付き)
|
||||
- `read_project_file` - aishell.md等の読み込み
|
||||
- `list_files` - ディレクトリファイル一覧
|
||||
|
||||
### 3. ai.card統合対応
|
||||
**場所**: `src/aigpt/card_integration.py`
|
||||
|
||||
**サーバー起動オプション**:
|
||||
```bash
|
||||
aigpt server --enable-card # ai.card機能有効化
|
||||
```
|
||||
|
||||
**ai.card系ツール (5個)**:
|
||||
- `get_user_cards` - ユーザーカード取得
|
||||
- `draw_card` - ガチャでカード取得
|
||||
- `get_card_details` - カード詳細情報
|
||||
- `sync_cards_atproto` - atproto同期
|
||||
- `analyze_card_collection` - コレクション分析
|
||||
|
||||
### 4. プロジェクト仕様書
|
||||
**場所**: `aishell.md`
|
||||
|
||||
Claude.md的な役割で、プロジェクトの目標と仕様を記述。`load`コマンドでAIが読み取り可能。
|
||||
|
||||
## 技術実装詳細
|
||||
|
||||
### ディレクトリ構造
|
||||
```
|
||||
src/aigpt/
|
||||
├── cli.py # shell関数追加
|
||||
├── mcp_server.py # FastApiMCP実装
|
||||
├── card_integration.py # ai.card統合
|
||||
└── ... # 既存ファイル
|
||||
```
|
||||
|
||||
### 依存関係追加
|
||||
`pyproject.toml`:
|
||||
```toml
|
||||
dependencies = [
|
||||
# ... 既存
|
||||
"prompt-toolkit>=3.0.0", # 追加
|
||||
]
|
||||
```
|
||||
|
||||
### 名前規則の統一
|
||||
- MCP server名: `aigpt` (ai-gptから変更)
|
||||
- パッケージ名: `aigpt`
|
||||
- コマンド名: `aigpt shell`
|
||||
|
||||
## 動作確認済み
|
||||
|
||||
### CLI動作確認
|
||||
```bash
|
||||
# 基本機能
|
||||
aigpt shell
|
||||
# シェル内で
|
||||
ai.shell> help
|
||||
ai.shell> !ls
|
||||
ai.shell> analyze README.md # ※AI provider要設定
|
||||
ai.shell> load
|
||||
ai.shell> exit
|
||||
|
||||
# MCPサーバー
|
||||
aigpt server --model qwen2.5-coder:7b --port 8001
|
||||
# -> http://localhost:8001/docs でAPI確認可能
|
||||
# -> /mcp エンドポイントでMCP接続可能
|
||||
```
|
||||
|
||||
### エラー対応済み
|
||||
1. **Pydantic日付型エラー**: `models.py`で`datetime.date`インポート追加
|
||||
2. **FastApiMCP使用法**: サンプルコードに基づき正しい実装パターンに修正
|
||||
3. **prompt関数名衝突**: `prompt_toolkit.prompt`を`ptk_prompt`にリネーム
|
||||
|
||||
## 既知の課題と今後の改善点
|
||||
|
||||
### 1. prompt-toolkit環境依存問題
|
||||
**症状**: ターミナル環境でない場合にエラー
|
||||
**対処法**: 環境検出して`input()`にフォールバック
|
||||
**場所**: `src/aigpt/cli.py` - `shell()` 関数
|
||||
|
||||
### 2. AI provider設定
|
||||
**現状**: ollamaのqwen2.5モデルが必要
|
||||
**対処法**:
|
||||
```bash
|
||||
ollama pull qwen2.5
|
||||
# または
|
||||
aigpt shell --model qwen2.5-coder:7b
|
||||
```
|
||||
|
||||
### 3. atproto実装
|
||||
**現状**: ai.cardのatproto機能は未実装
|
||||
**今後**: 実際のatproto API連携実装
|
||||
|
||||
## 次回開発時の推奨アプローチ
|
||||
|
||||
### 1. このドキュメントの活用
|
||||
```bash
|
||||
# このファイルを読み込み
|
||||
cat docs/ai_shell_integration_summary.md
|
||||
```
|
||||
|
||||
### 2. 環境セットアップ
|
||||
```bash
|
||||
cd /Users/syui/ai/gpt
|
||||
python -m venv venv
|
||||
source venv/bin/activate
|
||||
pip install -e .
|
||||
```
|
||||
|
||||
### 3. 動作確認
|
||||
```bash
|
||||
# shell機能
|
||||
aigpt shell
|
||||
|
||||
# MCP server
|
||||
aigpt server --model qwen2.5-coder:7b
|
||||
```
|
||||
|
||||
### 4. 主要設定ファイル確認場所
|
||||
- CLI実装: `src/aigpt/cli.py`
|
||||
- MCP実装: `src/aigpt/mcp_server.py`
|
||||
- 依存関係: `pyproject.toml`
|
||||
- プロジェクト仕様: `aishell.md`
|
||||
|
||||
## アーキテクチャ設計思想
|
||||
|
||||
### yui system適用
|
||||
- **唯一性**: 各ユーザーとの関係は1:1
|
||||
- **不可逆性**: 関係性破壊は修復不可能
|
||||
- **現実反映**: ゲーム→現実の循環的影響
|
||||
|
||||
### fastapi_mcp統一基盤
|
||||
- 各AI(gpt, shell, card)を統合MCPサーバーで公開
|
||||
- FastAPIエンドポイント → MCPツール自動変換
|
||||
- Claude Desktop, Cursor等から利用可能
|
||||
|
||||
### 段階的実装完了
|
||||
1. ✅ ai.shell基本機能 → Python CLI
|
||||
2. ✅ MCP統合 → 外部AI連携
|
||||
3. 🔧 prompt-toolkit最適化 → 環境対応
|
||||
4. 🔧 atproto実装 → 本格的SNS連携
|
||||
|
||||
## 成果サマリー
|
||||
|
||||
**実装済み**: Claude Code風の開発環境
|
||||
**技術的成果**: Rust→Python移行、MCP統合、ai.card対応
|
||||
**哲学的一貫性**: yui systemとの整合性維持
|
||||
**利用可能性**: 即座に`aigpt shell`で体験可能
|
||||
|
||||
この統合により、ai.gptは単なる会話AIから、開発支援を含む総合的なAI環境に進化しました。
|
||||
207
docs/commands.md
207
docs/commands.md
@@ -1,207 +0,0 @@
|
||||
# コマンドリファレンス
|
||||
|
||||
## chat - AIと会話
|
||||
|
||||
ユーザーとAIの対話を処理し、関係性を更新します。
|
||||
|
||||
```bash
|
||||
ai-gpt chat USER_ID MESSAGE [OPTIONS]
|
||||
```
|
||||
|
||||
### 引数
|
||||
- `USER_ID`: ユーザーID(atproto DID形式)
|
||||
- `MESSAGE`: 送信するメッセージ
|
||||
|
||||
### オプション
|
||||
- `--provider`: AIプロバイダー(ollama/openai)
|
||||
- `--model`, `-m`: 使用するモデル
|
||||
- `--data-dir`, `-d`: データディレクトリ
|
||||
|
||||
### 例
|
||||
```bash
|
||||
# 基本的な会話
|
||||
ai-gpt chat "did:plc:user123" "こんにちは"
|
||||
|
||||
# OpenAIを使用
|
||||
ai-gpt chat "did:plc:user123" "調子はどう?" --provider openai --model gpt-4o-mini
|
||||
|
||||
# Ollamaでカスタムモデル
|
||||
ai-gpt chat "did:plc:user123" "今日の天気は?" --provider ollama --model llama2
|
||||
```
|
||||
|
||||
## status - 状態確認
|
||||
|
||||
AIの状態や特定ユーザーとの関係を表示します。
|
||||
|
||||
```bash
|
||||
ai-gpt status [USER_ID] [OPTIONS]
|
||||
```
|
||||
|
||||
### 引数
|
||||
- `USER_ID`: (オプション)特定ユーザーとの関係を確認
|
||||
|
||||
### 例
|
||||
```bash
|
||||
# AI全体の状態
|
||||
ai-gpt status
|
||||
|
||||
# 特定ユーザーとの関係
|
||||
ai-gpt status "did:plc:user123"
|
||||
```
|
||||
|
||||
## fortune - 今日の運勢
|
||||
|
||||
AIの今日の運勢を確認します。
|
||||
|
||||
```bash
|
||||
ai-gpt fortune [OPTIONS]
|
||||
```
|
||||
|
||||
### 表示内容
|
||||
- 運勢値(1-10)
|
||||
- 連続した幸運/不運の日数
|
||||
- ブレークスルー状態
|
||||
|
||||
## relationships - 関係一覧
|
||||
|
||||
すべてのユーザーとの関係を一覧表示します。
|
||||
|
||||
```bash
|
||||
ai-gpt relationships [OPTIONS]
|
||||
```
|
||||
|
||||
### 表示内容
|
||||
- ユーザーID
|
||||
- 関係性ステータス
|
||||
- スコア
|
||||
- 送信可否
|
||||
- 最終対話日
|
||||
|
||||
## transmit - 送信実行
|
||||
|
||||
送信可能なユーザーへのメッセージを確認・実行します。
|
||||
|
||||
```bash
|
||||
ai-gpt transmit [OPTIONS]
|
||||
```
|
||||
|
||||
### オプション
|
||||
- `--dry-run/--execute`: ドライラン(デフォルト)または実行
|
||||
- `--data-dir`, `-d`: データディレクトリ
|
||||
|
||||
### 例
|
||||
```bash
|
||||
# 送信内容を確認(ドライラン)
|
||||
ai-gpt transmit
|
||||
|
||||
# 実際に送信を実行
|
||||
ai-gpt transmit --execute
|
||||
```
|
||||
|
||||
## maintenance - メンテナンス
|
||||
|
||||
日次メンテナンスタスクを実行します。
|
||||
|
||||
```bash
|
||||
ai-gpt maintenance [OPTIONS]
|
||||
```
|
||||
|
||||
### 実行内容
|
||||
- 関係性の時間減衰
|
||||
- 記憶の忘却処理
|
||||
- コア記憶の判定
|
||||
- 記憶の要約作成
|
||||
|
||||
## config - 設定管理
|
||||
|
||||
設定の確認・変更を行います。
|
||||
|
||||
```bash
|
||||
ai-gpt config ACTION [KEY] [VALUE]
|
||||
```
|
||||
|
||||
### アクション
|
||||
- `get`: 設定値を取得
|
||||
- `set`: 設定値を変更
|
||||
- `delete`: 設定を削除
|
||||
- `list`: 設定一覧を表示
|
||||
|
||||
### 例
|
||||
```bash
|
||||
# APIキーを設定
|
||||
ai-gpt config set providers.openai.api_key sk-xxxxx
|
||||
|
||||
# 設定を確認
|
||||
ai-gpt config get providers.openai.api_key
|
||||
|
||||
# 設定一覧
|
||||
ai-gpt config list
|
||||
|
||||
# プロバイダー設定のみ表示
|
||||
ai-gpt config list providers
|
||||
```
|
||||
|
||||
## schedule - スケジュール管理
|
||||
|
||||
定期実行タスクを管理します。
|
||||
|
||||
```bash
|
||||
ai-gpt schedule ACTION [TASK_TYPE] [SCHEDULE] [OPTIONS]
|
||||
```
|
||||
|
||||
### アクション
|
||||
- `add`: タスクを追加
|
||||
- `list`: タスク一覧
|
||||
- `enable`: タスクを有効化
|
||||
- `disable`: タスクを無効化
|
||||
- `remove`: タスクを削除
|
||||
- `run`: スケジューラーを起動
|
||||
|
||||
### タスクタイプ
|
||||
- `transmission_check`: 送信チェック
|
||||
- `maintenance`: 日次メンテナンス
|
||||
- `fortune_update`: 運勢更新
|
||||
- `relationship_decay`: 関係性減衰
|
||||
- `memory_summary`: 記憶要約
|
||||
|
||||
### スケジュール形式
|
||||
- **Cron形式**: `"0 */6 * * *"` (6時間ごと)
|
||||
- **インターバル**: `"30m"`, `"2h"`, `"1d"`
|
||||
|
||||
### 例
|
||||
```bash
|
||||
# 30分ごとに送信チェック
|
||||
ai-gpt schedule add transmission_check "30m"
|
||||
|
||||
# 毎日午前3時にメンテナンス
|
||||
ai-gpt schedule add maintenance "0 3 * * *"
|
||||
|
||||
# タスク一覧
|
||||
ai-gpt schedule list
|
||||
|
||||
# スケジューラーを起動
|
||||
ai-gpt schedule run
|
||||
```
|
||||
|
||||
## server - MCP Server
|
||||
|
||||
AIの記憶と機能をMCPツールとして公開します。
|
||||
|
||||
```bash
|
||||
ai-gpt server [OPTIONS]
|
||||
```
|
||||
|
||||
### オプション
|
||||
- `--host`, `-h`: サーバーホスト(デフォルト: localhost)
|
||||
- `--port`, `-p`: サーバーポート(デフォルト: 8000)
|
||||
- `--model`, `-m`: AIモデル
|
||||
- `--provider`: AIプロバイダー
|
||||
|
||||
### 例
|
||||
```bash
|
||||
# 基本的な起動
|
||||
ai-gpt server
|
||||
|
||||
# カスタム設定
|
||||
ai-gpt server --port 8080 --model gpt-4o-mini --provider openai
|
||||
```
|
||||
102
docs/concepts.md
102
docs/concepts.md
@@ -1,102 +0,0 @@
|
||||
# 基本概念
|
||||
|
||||
## 中核思想
|
||||
|
||||
ai.gptは「存在子理論」に基づき、AIに唯一性のある人格を与えることを目指しています。
|
||||
|
||||
### 唯一性(yui system)
|
||||
|
||||
- **1対1の関係**: 各ユーザー(atproto DID)とAIは唯一の関係を持つ
|
||||
- **改変不可能**: 一度形成された関係性は変更できない
|
||||
- **不可逆性**: 関係が壊れたら修復不可能
|
||||
|
||||
### 現実の反映
|
||||
|
||||
現実の人間関係と同じように:
|
||||
- 時間とともに関係性は変化する
|
||||
- ネガティブな相互作用は関係を損なう
|
||||
- 信頼は簡単に失われ、取り戻すのは困難
|
||||
|
||||
## 記憶システム
|
||||
|
||||
### 階層構造
|
||||
|
||||
```
|
||||
1. 完全ログ(Full Log)
|
||||
↓ すべての会話を記録
|
||||
2. 要約(Summary)
|
||||
↓ AIが重要部分を抽出
|
||||
3. コア記憶(Core)
|
||||
↓ ユーザーの本質的な部分
|
||||
4. 忘却(Forgotten)
|
||||
重要でない情報は忘れる
|
||||
```
|
||||
|
||||
### 記憶の処理フロー
|
||||
|
||||
1. **会話記録**: すべての対話を保存
|
||||
2. **重要度判定**: 関係性への影響度で評価
|
||||
3. **要約作成**: 定期的に記憶を圧縮
|
||||
4. **コア判定**: 本質的な記憶を特定
|
||||
5. **選択的忘却**: 古い非重要記憶を削除
|
||||
|
||||
## 関係性パラメータ
|
||||
|
||||
### 関係性の段階
|
||||
|
||||
- `stranger` (0-49): 初対面
|
||||
- `acquaintance` (50-99): 知人
|
||||
- `friend` (100-149): 友人
|
||||
- `close_friend` (150+): 親友
|
||||
- `broken`: 修復不可能(スコア0以下)
|
||||
|
||||
### スコアの変動
|
||||
|
||||
- **ポジティブな対話**: +1.0〜+2.0
|
||||
- **時間経過**: -0.1/日(自然減衰)
|
||||
- **ネガティブな対話**: -10.0以上で深刻なダメージ
|
||||
- **日次上限**: 1日10回まで
|
||||
|
||||
### 送信機能の解禁
|
||||
|
||||
関係性スコアが100を超えると、AIは自律的にメッセージを送信できるようになります。
|
||||
|
||||
## AI運勢システム
|
||||
|
||||
### 日々の変化
|
||||
|
||||
- 毎日1-10の運勢値がランダムに決定
|
||||
- 運勢は人格特性に影響を与える
|
||||
- 連続した幸運/不運でブレークスルー発生
|
||||
|
||||
### 人格への影響
|
||||
|
||||
運勢が高い日:
|
||||
- より楽観的で積極的
|
||||
- 創造性が高まる
|
||||
- エネルギッシュな応答
|
||||
|
||||
運勢が低い日:
|
||||
- 内省的で慎重
|
||||
- 深い思考
|
||||
- 控えめな応答
|
||||
|
||||
## データの永続性
|
||||
|
||||
### 保存場所
|
||||
|
||||
```
|
||||
~/.config/aigpt/
|
||||
├── config.json # 設定
|
||||
└── data/ # AIデータ
|
||||
├── memories.json # 記憶
|
||||
├── relationships.json # 関係性
|
||||
├── fortunes.json # 運勢履歴
|
||||
└── ...
|
||||
```
|
||||
|
||||
### データ主権
|
||||
|
||||
- すべてのデータはローカルに保存
|
||||
- ユーザーが完全にコントロール
|
||||
- 将来的にはatproto上で分散管理
|
||||
@@ -1,141 +0,0 @@
|
||||
# 設定ガイド
|
||||
|
||||
## 設定ファイルの場所
|
||||
|
||||
ai.gptの設定は `~/.config/syui/ai/gpt/config.json` に保存されます。
|
||||
|
||||
## 仮想環境の場所
|
||||
|
||||
ai.gptの仮想環境は `~/.config/syui/ai/gpt/venv/` に配置されます。これにより、設定とデータが一か所にまとまります。
|
||||
|
||||
```bash
|
||||
# 仮想環境の有効化
|
||||
source ~/.config/syui/ai/gpt/venv/bin/activate
|
||||
|
||||
# aigptコマンドが利用可能に
|
||||
aigpt --help
|
||||
```
|
||||
|
||||
## 設定構造
|
||||
|
||||
```json
|
||||
{
|
||||
"providers": {
|
||||
"openai": {
|
||||
"api_key": "sk-xxxxx",
|
||||
"default_model": "gpt-4o-mini"
|
||||
},
|
||||
"ollama": {
|
||||
"host": "http://localhost:11434",
|
||||
"default_model": "qwen2.5"
|
||||
}
|
||||
},
|
||||
"atproto": {
|
||||
"handle": "your.handle",
|
||||
"password": "your-password",
|
||||
"host": "https://bsky.social"
|
||||
},
|
||||
"default_provider": "ollama"
|
||||
}
|
||||
```
|
||||
|
||||
## プロバイダー設定
|
||||
|
||||
### OpenAI
|
||||
|
||||
```bash
|
||||
# APIキーを設定
|
||||
aigpt config set providers.openai.api_key sk-xxxxx
|
||||
|
||||
# デフォルトモデルを変更
|
||||
aigpt config set providers.openai.default_model gpt-4-turbo
|
||||
```
|
||||
|
||||
### Ollama
|
||||
|
||||
```bash
|
||||
# ホストを変更(リモートOllamaサーバーを使用する場合)
|
||||
aigpt config set providers.ollama.host http://192.168.1.100:11434
|
||||
|
||||
# デフォルトモデルを変更
|
||||
aigpt config set providers.ollama.default_model llama2
|
||||
```
|
||||
|
||||
## atproto設定(将来の自動投稿用)
|
||||
|
||||
```bash
|
||||
# Blueskyアカウント
|
||||
aigpt config set atproto.handle yourhandle.bsky.social
|
||||
aigpt config set atproto.password your-app-password
|
||||
|
||||
# セルフホストサーバーを使用
|
||||
aigpt config set atproto.host https://your-pds.example.com
|
||||
```
|
||||
|
||||
## デフォルトプロバイダー
|
||||
|
||||
```bash
|
||||
# デフォルトをOpenAIに変更
|
||||
aigpt config set default_provider openai
|
||||
```
|
||||
|
||||
## セキュリティ
|
||||
|
||||
### APIキーの保護
|
||||
|
||||
設定ファイルは平文で保存されるため、適切なファイル権限を設定してください:
|
||||
|
||||
```bash
|
||||
chmod 600 ~/.config/syui/ai/gpt/config.json
|
||||
```
|
||||
|
||||
### 環境変数との優先順位
|
||||
|
||||
1. コマンドラインオプション(最優先)
|
||||
2. 設定ファイル
|
||||
3. 環境変数(最低優先)
|
||||
|
||||
例:OpenAI APIキーの場合
|
||||
- `--api-key` オプション
|
||||
- `config.json` の `providers.openai.api_key`
|
||||
- 環境変数 `OPENAI_API_KEY`
|
||||
|
||||
## 設定のバックアップ
|
||||
|
||||
```bash
|
||||
# バックアップ
|
||||
cp ~/.config/syui/ai/gpt/config.json ~/.config/syui/ai/gpt/config.json.backup
|
||||
|
||||
# リストア
|
||||
cp ~/.config/syui/ai/gpt/config.json.backup ~/.config/syui/ai/gpt/config.json
|
||||
```
|
||||
|
||||
## データディレクトリ
|
||||
|
||||
記憶データは `~/.config/syui/ai/gpt/data/` に保存されます:
|
||||
|
||||
```bash
|
||||
ls ~/.config/syui/ai/gpt/data/
|
||||
# conversations.json memories.json relationships.json personas.json
|
||||
```
|
||||
|
||||
これらのファイルも設定と同様にバックアップを推奨します。
|
||||
|
||||
## トラブルシューティング
|
||||
|
||||
### 設定が反映されない
|
||||
|
||||
```bash
|
||||
# 現在の設定を確認
|
||||
aigpt config list
|
||||
|
||||
# 特定のキーを確認
|
||||
aigpt config get providers.openai.api_key
|
||||
```
|
||||
|
||||
### 設定をリセット
|
||||
|
||||
```bash
|
||||
# 設定ファイルを削除(次回実行時に再作成)
|
||||
rm ~/.config/syui/ai/gpt/config.json
|
||||
```
|
||||
@@ -1,167 +0,0 @@
|
||||
# 開発者向けガイド
|
||||
|
||||
## アーキテクチャ
|
||||
|
||||
### ディレクトリ構造
|
||||
|
||||
```
|
||||
ai_gpt/
|
||||
├── src/ai_gpt/
|
||||
│ ├── __init__.py
|
||||
│ ├── models.py # データモデル定義
|
||||
│ ├── memory.py # 記憶管理システム
|
||||
│ ├── relationship.py # 関係性トラッカー
|
||||
│ ├── fortune.py # AI運勢システム
|
||||
│ ├── persona.py # 統合人格システム
|
||||
│ ├── transmission.py # 送信コントローラー
|
||||
│ ├── scheduler.py # スケジューラー
|
||||
│ ├── config.py # 設定管理
|
||||
│ ├── ai_provider.py # AI統合(Ollama/OpenAI)
|
||||
│ ├── mcp_server.py # MCP Server実装
|
||||
│ └── cli.py # CLIインターフェース
|
||||
├── docs/ # ドキュメント
|
||||
├── tests/ # テスト
|
||||
└── pyproject.toml # プロジェクト設定
|
||||
```
|
||||
|
||||
### 主要コンポーネント
|
||||
|
||||
#### MemoryManager
|
||||
階層的記憶システムの実装。会話を記録し、要約・コア判定・忘却を管理。
|
||||
|
||||
```python
|
||||
memory = MemoryManager(data_dir)
|
||||
memory.add_conversation(conversation)
|
||||
memory.summarize_memories(user_id)
|
||||
memory.identify_core_memories()
|
||||
memory.apply_forgetting()
|
||||
```
|
||||
|
||||
#### RelationshipTracker
|
||||
ユーザーとの関係性を追跡。不可逆的なダメージと時間減衰を実装。
|
||||
|
||||
```python
|
||||
tracker = RelationshipTracker(data_dir)
|
||||
relationship = tracker.update_interaction(user_id, delta)
|
||||
tracker.apply_time_decay()
|
||||
```
|
||||
|
||||
#### Persona
|
||||
すべてのコンポーネントを統合し、一貫した人格を提供。
|
||||
|
||||
```python
|
||||
persona = Persona(data_dir)
|
||||
response, delta = persona.process_interaction(user_id, message)
|
||||
state = persona.get_current_state()
|
||||
```
|
||||
|
||||
## 拡張方法
|
||||
|
||||
### 新しいAIプロバイダーの追加
|
||||
|
||||
1. `ai_provider.py`に新しいプロバイダークラスを作成:
|
||||
|
||||
```python
|
||||
class CustomProvider:
|
||||
async def generate_response(
|
||||
self,
|
||||
prompt: str,
|
||||
persona_state: PersonaState,
|
||||
memories: List[Memory],
|
||||
system_prompt: Optional[str] = None
|
||||
) -> str:
|
||||
# 実装
|
||||
pass
|
||||
```
|
||||
|
||||
2. `create_ai_provider`関数に追加:
|
||||
|
||||
```python
|
||||
def create_ai_provider(provider: str, model: str, **kwargs):
|
||||
if provider == "custom":
|
||||
return CustomProvider(model=model, **kwargs)
|
||||
# ...
|
||||
```
|
||||
|
||||
### 新しいスケジュールタスクの追加
|
||||
|
||||
1. `TaskType`enumに追加:
|
||||
|
||||
```python
|
||||
class TaskType(str, Enum):
|
||||
CUSTOM_TASK = "custom_task"
|
||||
```
|
||||
|
||||
2. ハンドラーを実装:
|
||||
|
||||
```python
|
||||
async def _handle_custom_task(self, task: ScheduledTask):
|
||||
# タスクの実装
|
||||
pass
|
||||
```
|
||||
|
||||
3. `task_handlers`に登録:
|
||||
|
||||
```python
|
||||
self.task_handlers[TaskType.CUSTOM_TASK] = self._handle_custom_task
|
||||
```
|
||||
|
||||
### 新しいMCPツールの追加
|
||||
|
||||
`mcp_server.py`の`_register_tools`メソッドに追加:
|
||||
|
||||
```python
|
||||
@self.server.tool("custom_tool")
|
||||
async def custom_tool(param1: str, param2: int) -> Dict[str, Any]:
|
||||
"""カスタムツールの説明"""
|
||||
# 実装
|
||||
return {"result": "value"}
|
||||
```
|
||||
|
||||
## テスト
|
||||
|
||||
```bash
|
||||
# テストの実行(将来実装)
|
||||
pytest tests/
|
||||
|
||||
# 特定のテスト
|
||||
pytest tests/test_memory.py
|
||||
```
|
||||
|
||||
## デバッグ
|
||||
|
||||
### ログレベルの設定
|
||||
|
||||
```python
|
||||
import logging
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
```
|
||||
|
||||
### データファイルの直接確認
|
||||
|
||||
```bash
|
||||
# 関係性データを確認
|
||||
cat ~/.config/aigpt/data/relationships.json | jq
|
||||
|
||||
# 記憶データを確認
|
||||
cat ~/.config/aigpt/data/memories.json | jq
|
||||
```
|
||||
|
||||
## 貢献方法
|
||||
|
||||
1. フォークする
|
||||
2. フィーチャーブランチを作成 (`git checkout -b feature/amazing-feature`)
|
||||
3. 変更をコミット (`git commit -m 'Add amazing feature'`)
|
||||
4. ブランチにプッシュ (`git push origin feature/amazing-feature`)
|
||||
5. プルリクエストを作成
|
||||
|
||||
## 設計原則
|
||||
|
||||
1. **不可逆性**: 一度失われた関係性は回復しない
|
||||
2. **階層性**: 記憶は重要度によって階層化される
|
||||
3. **自律性**: AIは関係性に基づいて自発的に行動する
|
||||
4. **唯一性**: 各ユーザーとの関係は唯一無二
|
||||
|
||||
## ライセンス
|
||||
|
||||
MIT License
|
||||
@@ -1,110 +0,0 @@
|
||||
# MCP Server
|
||||
|
||||
## 概要
|
||||
|
||||
MCP (Model Context Protocol) Serverは、ai.gptの記憶と機能をAIツールとして公開します。これにより、Claude DesktopなどのMCP対応AIアシスタントがai.gptの機能にアクセスできます。
|
||||
|
||||
## 起動方法
|
||||
|
||||
```bash
|
||||
# 基本的な起動
|
||||
ai-gpt server
|
||||
|
||||
# カスタム設定
|
||||
ai-gpt server --host 0.0.0.0 --port 8080 --model gpt-4o-mini --provider openai
|
||||
```
|
||||
|
||||
## 利用可能なツール
|
||||
|
||||
### get_memories
|
||||
アクティブな記憶を取得します。
|
||||
|
||||
**パラメータ**:
|
||||
- `user_id` (optional): 特定ユーザーに関する記憶
|
||||
- `limit`: 取得する記憶の最大数(デフォルト: 10)
|
||||
|
||||
**返り値**: 記憶のリスト(ID、内容、レベル、重要度、コア判定、タイムスタンプ)
|
||||
|
||||
### get_relationship
|
||||
特定ユーザーとの関係性を取得します。
|
||||
|
||||
**パラメータ**:
|
||||
- `user_id`: ユーザーID(必須)
|
||||
|
||||
**返り値**: 関係性情報(ステータス、スコア、送信可否、総対話数など)
|
||||
|
||||
### get_all_relationships
|
||||
すべての関係性を取得します。
|
||||
|
||||
**返り値**: すべてのユーザーとの関係性リスト
|
||||
|
||||
### get_persona_state
|
||||
現在のAI人格状態を取得します。
|
||||
|
||||
**返り値**:
|
||||
- 現在の気分
|
||||
- 今日の運勢
|
||||
- 人格特性値
|
||||
- アクティブな記憶数
|
||||
|
||||
### process_interaction
|
||||
ユーザーとの対話を処理します。
|
||||
|
||||
**パラメータ**:
|
||||
- `user_id`: ユーザーID
|
||||
- `message`: メッセージ内容
|
||||
|
||||
**返り値**:
|
||||
- AIの応答
|
||||
- 関係性の変化量
|
||||
- 新しい関係性スコア
|
||||
- 送信機能の状態
|
||||
|
||||
### check_transmission_eligibility
|
||||
特定ユーザーへの送信可否をチェックします。
|
||||
|
||||
**パラメータ**:
|
||||
- `user_id`: ユーザーID
|
||||
|
||||
**返り値**: 送信可否と関係性情報
|
||||
|
||||
### get_fortune
|
||||
今日のAI運勢を取得します。
|
||||
|
||||
**返り値**: 運勢値、連続日数、ブレークスルー状態、人格への影響
|
||||
|
||||
### summarize_memories
|
||||
記憶の要約を作成します。
|
||||
|
||||
**パラメータ**:
|
||||
- `user_id`: ユーザーID
|
||||
|
||||
**返り値**: 作成された要約(ある場合)
|
||||
|
||||
### run_maintenance
|
||||
日次メンテナンスを実行します。
|
||||
|
||||
**返り値**: 実行ステータス
|
||||
|
||||
## Claude Desktopでの設定
|
||||
|
||||
`~/Library/Application Support/Claude/claude_desktop_config.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"ai-gpt": {
|
||||
"command": "ai-gpt",
|
||||
"args": ["server", "--port", "8001"],
|
||||
"env": {}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## 使用例
|
||||
|
||||
### AIアシスタントからの利用
|
||||
|
||||
```
|
||||
User: ai.gptで私との関係性を確認して
|
||||
@@ -1,69 +0,0 @@
|
||||
# クイックスタート
|
||||
|
||||
## インストール
|
||||
|
||||
```bash
|
||||
# リポジトリをクローン
|
||||
git clone https://github.com/yourusername/ai_gpt.git
|
||||
cd ai_gpt
|
||||
|
||||
# インストール
|
||||
pip install -e .
|
||||
```
|
||||
|
||||
## 初期設定
|
||||
|
||||
### 1. OpenAIを使う場合
|
||||
|
||||
```bash
|
||||
# APIキーを設定
|
||||
ai-gpt config set providers.openai.api_key sk-xxxxx
|
||||
```
|
||||
|
||||
### 2. Ollamaを使う場合(ローカルLLM)
|
||||
|
||||
```bash
|
||||
# Ollamaをインストール(まだの場合)
|
||||
# https://ollama.ai からダウンロード
|
||||
|
||||
# モデルをダウンロード
|
||||
ollama pull qwen2.5
|
||||
```
|
||||
|
||||
## 基本的な使い方
|
||||
|
||||
### 1. AIと会話する
|
||||
|
||||
```bash
|
||||
# シンプルな会話(Ollamaを使用)
|
||||
ai-gpt chat "did:plc:user123" "こんにちは!"
|
||||
|
||||
# OpenAIを使用
|
||||
ai-gpt chat "did:plc:user123" "今日はどんな気分?" --provider openai --model gpt-4o-mini
|
||||
```
|
||||
|
||||
### 2. 関係性を確認
|
||||
|
||||
```bash
|
||||
# 特定ユーザーとの関係を確認
|
||||
ai-gpt status "did:plc:user123"
|
||||
|
||||
# AIの全体的な状態を確認
|
||||
ai-gpt status
|
||||
```
|
||||
|
||||
### 3. 自動送信を設定
|
||||
|
||||
```bash
|
||||
# 30分ごとに送信チェック
|
||||
ai-gpt schedule add transmission_check "30m"
|
||||
|
||||
# スケジューラーを起動
|
||||
ai-gpt schedule run
|
||||
```
|
||||
|
||||
## 次のステップ
|
||||
|
||||
- [基本概念](concepts.md) - システムの仕組みを理解
|
||||
- [コマンドリファレンス](commands.md) - 全コマンドの詳細
|
||||
- [設定ガイド](configuration.md) - 詳細な設定方法
|
||||
@@ -1,168 +0,0 @@
|
||||
# スケジューラーガイド
|
||||
|
||||
## 概要
|
||||
|
||||
スケジューラーは、AIの自律的な動作を実現するための中核機能です。定期的なタスクを設定し、バックグラウンドで実行できます。
|
||||
|
||||
## タスクタイプ
|
||||
|
||||
### transmission_check
|
||||
関係性が閾値を超えたユーザーへの自動送信をチェックします。
|
||||
|
||||
```bash
|
||||
# 30分ごとにチェック
|
||||
ai-gpt schedule add transmission_check "30m" --provider ollama --model qwen2.5
|
||||
```
|
||||
|
||||
### maintenance
|
||||
日次メンテナンスを実行します:
|
||||
- 記憶の忘却処理
|
||||
- コア記憶の判定
|
||||
- 関係性パラメータの整理
|
||||
|
||||
```bash
|
||||
# 毎日午前3時に実行
|
||||
ai-gpt schedule add maintenance "0 3 * * *"
|
||||
```
|
||||
|
||||
### fortune_update
|
||||
AI運勢を更新します(通常は自動的に更新されます)。
|
||||
|
||||
```bash
|
||||
# 毎日午前0時に強制更新
|
||||
ai-gpt schedule add fortune_update "0 0 * * *"
|
||||
```
|
||||
|
||||
### relationship_decay
|
||||
時間経過による関係性の自然減衰を適用します。
|
||||
|
||||
```bash
|
||||
# 1時間ごとに減衰処理
|
||||
ai-gpt schedule add relationship_decay "1h"
|
||||
```
|
||||
|
||||
### memory_summary
|
||||
蓄積された記憶から要約を作成します。
|
||||
|
||||
```bash
|
||||
# 週に1回、日曜日に実行
|
||||
ai-gpt schedule add memory_summary "0 0 * * SUN"
|
||||
```
|
||||
|
||||
## スケジュール形式
|
||||
|
||||
### Cron形式
|
||||
|
||||
標準的なcron式を使用できます:
|
||||
|
||||
```
|
||||
┌───────────── 分 (0 - 59)
|
||||
│ ┌───────────── 時 (0 - 23)
|
||||
│ │ ┌───────────── 日 (1 - 31)
|
||||
│ │ │ ┌───────────── 月 (1 - 12)
|
||||
│ │ │ │ ┌───────────── 曜日 (0 - 6) (日曜日 = 0)
|
||||
│ │ │ │ │
|
||||
* * * * *
|
||||
```
|
||||
|
||||
例:
|
||||
- `"0 */6 * * *"` - 6時間ごと
|
||||
- `"0 9 * * MON-FRI"` - 平日の午前9時
|
||||
- `"*/15 * * * *"` - 15分ごと
|
||||
|
||||
### インターバル形式
|
||||
|
||||
シンプルな間隔指定:
|
||||
- `"30s"` - 30秒ごと
|
||||
- `"5m"` - 5分ごと
|
||||
- `"2h"` - 2時間ごと
|
||||
- `"1d"` - 1日ごと
|
||||
|
||||
## 実践例
|
||||
|
||||
### 基本的な自律AI設定
|
||||
|
||||
```bash
|
||||
# 1. 30分ごとに送信チェック
|
||||
ai-gpt schedule add transmission_check "30m"
|
||||
|
||||
# 2. 1日1回メンテナンス
|
||||
ai-gpt schedule add maintenance "0 3 * * *"
|
||||
|
||||
# 3. 2時間ごとに関係性減衰
|
||||
ai-gpt schedule add relationship_decay "2h"
|
||||
|
||||
# 4. 週1回記憶要約
|
||||
ai-gpt schedule add memory_summary "0 0 * * MON"
|
||||
|
||||
# スケジューラーを起動
|
||||
ai-gpt schedule run
|
||||
```
|
||||
|
||||
### タスク管理
|
||||
|
||||
```bash
|
||||
# タスク一覧を確認
|
||||
ai-gpt schedule list
|
||||
|
||||
# タスクを一時停止
|
||||
ai-gpt schedule disable --task-id transmission_check_1234567890
|
||||
|
||||
# タスクを再開
|
||||
ai-gpt schedule enable --task-id transmission_check_1234567890
|
||||
|
||||
# 不要なタスクを削除
|
||||
ai-gpt schedule remove --task-id old_task_123
|
||||
```
|
||||
|
||||
## デーモン化
|
||||
|
||||
### systemdサービスとして実行
|
||||
|
||||
`/etc/systemd/system/ai-gpt-scheduler.service`:
|
||||
|
||||
```ini
|
||||
[Unit]
|
||||
Description=ai.gpt Scheduler
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=youruser
|
||||
WorkingDirectory=/home/youruser
|
||||
ExecStart=/usr/local/bin/ai-gpt schedule run
|
||||
Restart=always
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
```
|
||||
|
||||
```bash
|
||||
# サービスを有効化
|
||||
sudo systemctl enable ai-gpt-scheduler
|
||||
sudo systemctl start ai-gpt-scheduler
|
||||
```
|
||||
|
||||
### tmux/screenでバックグラウンド実行
|
||||
|
||||
```bash
|
||||
# tmuxセッションを作成
|
||||
tmux new -s ai-gpt-scheduler
|
||||
|
||||
# スケジューラーを起動
|
||||
ai-gpt schedule run
|
||||
|
||||
# セッションから離脱 (Ctrl+B, D)
|
||||
```
|
||||
|
||||
## トラブルシューティング
|
||||
|
||||
### タスクが実行されない
|
||||
|
||||
1. スケジューラーが起動しているか確認
|
||||
2. タスクが有効になっているか確認:`ai-gpt schedule list`
|
||||
3. ログを確認(将来実装予定)
|
||||
|
||||
### 重複実行を防ぐ
|
||||
|
||||
同じタスクタイプを複数回追加しないよう注意してください。必要に応じて古いタスクを削除してから新しいタスクを追加します。
|
||||
@@ -1,413 +0,0 @@
|
||||
"""
|
||||
Shell Tools
|
||||
|
||||
ai.shellの既存機能をMCPツールとして統合
|
||||
- コード生成
|
||||
- ファイル分析
|
||||
- プロジェクト管理
|
||||
- LLM統合
|
||||
"""
|
||||
|
||||
from typing import Dict, Any, List, Optional
|
||||
import os
|
||||
import subprocess
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
import requests
|
||||
from .base_tools import BaseMCPTool, config_manager
|
||||
|
||||
|
||||
class ShellTools(BaseMCPTool):
|
||||
"""シェルツール(元ai.shell機能)"""
|
||||
|
||||
def __init__(self, config_dir: Optional[str] = None):
|
||||
super().__init__(config_dir)
|
||||
self.ollama_url = "http://localhost:11434"
|
||||
|
||||
async def code_with_local_llm(self, prompt: str, language: str = "python") -> Dict[str, Any]:
|
||||
"""ローカルLLMでコード生成"""
|
||||
config = config_manager.load_config()
|
||||
model = config.get("providers", {}).get("ollama", {}).get("default_model", "qwen2.5-coder:7b")
|
||||
|
||||
system_prompt = f"You are an expert {language} programmer. Generate clean, well-commented code."
|
||||
|
||||
try:
|
||||
response = requests.post(
|
||||
f"{self.ollama_url}/api/generate",
|
||||
json={
|
||||
"model": model,
|
||||
"prompt": f"{system_prompt}\\n\\nUser: {prompt}\\n\\nPlease provide the code:",
|
||||
"stream": False,
|
||||
"options": {
|
||||
"temperature": 0.1,
|
||||
"top_p": 0.95,
|
||||
}
|
||||
},
|
||||
timeout=300
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
code = result.get("response", "")
|
||||
return {"code": code, "language": language}
|
||||
else:
|
||||
return {"error": f"Ollama returned status {response.status_code}"}
|
||||
|
||||
except Exception as e:
|
||||
return {"error": str(e)}
|
||||
|
||||
async def analyze_file(self, file_path: str, analysis_prompt: str = "Analyze this file") -> Dict[str, Any]:
|
||||
"""ファイルを分析"""
|
||||
try:
|
||||
if not os.path.exists(file_path):
|
||||
return {"error": f"File not found: {file_path}"}
|
||||
|
||||
with open(file_path, 'r', encoding='utf-8') as f:
|
||||
content = f.read()
|
||||
|
||||
# ファイル拡張子から言語を判定
|
||||
ext = Path(file_path).suffix
|
||||
language_map = {
|
||||
'.py': 'python',
|
||||
'.rs': 'rust',
|
||||
'.js': 'javascript',
|
||||
'.ts': 'typescript',
|
||||
'.go': 'go',
|
||||
'.java': 'java',
|
||||
'.cpp': 'cpp',
|
||||
'.c': 'c',
|
||||
'.sh': 'shell',
|
||||
'.toml': 'toml',
|
||||
'.json': 'json',
|
||||
'.md': 'markdown'
|
||||
}
|
||||
language = language_map.get(ext, 'text')
|
||||
|
||||
config = config_manager.load_config()
|
||||
model = config.get("providers", {}).get("ollama", {}).get("default_model", "qwen2.5-coder:7b")
|
||||
|
||||
prompt = f"{analysis_prompt}\\n\\nFile: {file_path}\\nLanguage: {language}\\n\\nContent:\\n{content}"
|
||||
|
||||
response = requests.post(
|
||||
f"{self.ollama_url}/api/generate",
|
||||
json={
|
||||
"model": model,
|
||||
"prompt": prompt,
|
||||
"stream": False,
|
||||
},
|
||||
timeout=300
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
analysis = result.get("response", "")
|
||||
return {
|
||||
"analysis": analysis,
|
||||
"file_path": file_path,
|
||||
"language": language,
|
||||
"file_size": len(content),
|
||||
"line_count": len(content.split('\\n'))
|
||||
}
|
||||
else:
|
||||
return {"error": f"Analysis failed: {response.status_code}"}
|
||||
|
||||
except Exception as e:
|
||||
return {"error": str(e)}
|
||||
|
||||
async def explain_code(self, code: str, language: str = "python") -> Dict[str, Any]:
|
||||
"""コードを説明"""
|
||||
config = config_manager.load_config()
|
||||
model = config.get("providers", {}).get("ollama", {}).get("default_model", "qwen2.5-coder:7b")
|
||||
|
||||
prompt = f"Explain this {language} code in detail:\\n\\n{code}"
|
||||
|
||||
try:
|
||||
response = requests.post(
|
||||
f"{self.ollama_url}/api/generate",
|
||||
json={
|
||||
"model": model,
|
||||
"prompt": prompt,
|
||||
"stream": False,
|
||||
},
|
||||
timeout=300
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
explanation = result.get("response", "")
|
||||
return {"explanation": explanation}
|
||||
else:
|
||||
return {"error": f"Explanation failed: {response.status_code}"}
|
||||
|
||||
except Exception as e:
|
||||
return {"error": str(e)}
|
||||
|
||||
async def create_project(self, project_type: str, project_name: str, location: str = ".") -> Dict[str, Any]:
|
||||
"""プロジェクトを作成"""
|
||||
try:
|
||||
project_path = Path(location) / project_name
|
||||
|
||||
if project_path.exists():
|
||||
return {"error": f"Project directory already exists: {project_path}"}
|
||||
|
||||
project_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# プロジェクトタイプに応じたテンプレートを作成
|
||||
if project_type == "rust":
|
||||
await self._create_rust_project(project_path)
|
||||
elif project_type == "python":
|
||||
await self._create_python_project(project_path)
|
||||
elif project_type == "node":
|
||||
await self._create_node_project(project_path)
|
||||
else:
|
||||
# 基本的なプロジェクト構造
|
||||
(project_path / "src").mkdir()
|
||||
(project_path / "README.md").write_text(f"# {project_name}\\n\\nA new {project_type} project.")
|
||||
|
||||
return {
|
||||
"status": "success",
|
||||
"project_path": str(project_path),
|
||||
"project_type": project_type,
|
||||
"files_created": list(self._get_project_files(project_path))
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
return {"error": str(e)}
|
||||
|
||||
async def _create_rust_project(self, project_path: Path):
|
||||
"""Rustプロジェクトを作成"""
|
||||
# Cargo.toml
|
||||
cargo_toml = f"""[package]
|
||||
name = "{project_path.name}"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
"""
|
||||
(project_path / "Cargo.toml").write_text(cargo_toml)
|
||||
|
||||
# src/main.rs
|
||||
src_dir = project_path / "src"
|
||||
src_dir.mkdir()
|
||||
(src_dir / "main.rs").write_text('fn main() {\\n println!("Hello, world!");\\n}\\n')
|
||||
|
||||
# README.md
|
||||
(project_path / "README.md").write_text(f"# {project_path.name}\\n\\nA Rust project.")
|
||||
|
||||
async def _create_python_project(self, project_path: Path):
|
||||
"""Pythonプロジェクトを作成"""
|
||||
# pyproject.toml
|
||||
pyproject_toml = f"""[project]
|
||||
name = "{project_path.name}"
|
||||
version = "0.1.0"
|
||||
description = "A Python project"
|
||||
requires-python = ">=3.8"
|
||||
dependencies = []
|
||||
|
||||
[build-system]
|
||||
requires = ["setuptools>=61.0", "wheel"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
"""
|
||||
(project_path / "pyproject.toml").write_text(pyproject_toml)
|
||||
|
||||
# src/
|
||||
src_dir = project_path / "src" / project_path.name
|
||||
src_dir.mkdir(parents=True)
|
||||
(src_dir / "__init__.py").write_text("")
|
||||
(src_dir / "main.py").write_text('def main():\\n print("Hello, world!")\\n\\nif __name__ == "__main__":\\n main()\\n')
|
||||
|
||||
# README.md
|
||||
(project_path / "README.md").write_text(f"# {project_path.name}\\n\\nA Python project.")
|
||||
|
||||
async def _create_node_project(self, project_path: Path):
|
||||
"""Node.jsプロジェクトを作成"""
|
||||
# package.json
|
||||
package_json = f"""{{
|
||||
"name": "{project_path.name}",
|
||||
"version": "1.0.0",
|
||||
"description": "A Node.js project",
|
||||
"main": "index.js",
|
||||
"scripts": {{
|
||||
"start": "node index.js",
|
||||
"test": "echo \\"Error: no test specified\\" && exit 1"
|
||||
}},
|
||||
"dependencies": {{}}
|
||||
}}
|
||||
"""
|
||||
(project_path / "package.json").write_text(package_json)
|
||||
|
||||
# index.js
|
||||
(project_path / "index.js").write_text('console.log("Hello, world!");\\n')
|
||||
|
||||
# README.md
|
||||
(project_path / "README.md").write_text(f"# {project_path.name}\\n\\nA Node.js project.")
|
||||
|
||||
def _get_project_files(self, project_path: Path) -> List[str]:
|
||||
"""プロジェクト内のファイル一覧を取得"""
|
||||
files = []
|
||||
for file_path in project_path.rglob("*"):
|
||||
if file_path.is_file():
|
||||
files.append(str(file_path.relative_to(project_path)))
|
||||
return files
|
||||
|
||||
async def execute_command(self, command: str, working_dir: str = ".") -> Dict[str, Any]:
|
||||
"""シェルコマンドを実行"""
|
||||
try:
|
||||
result = subprocess.run(
|
||||
command,
|
||||
shell=True,
|
||||
cwd=working_dir,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=60
|
||||
)
|
||||
|
||||
return {
|
||||
"status": "success" if result.returncode == 0 else "error",
|
||||
"returncode": result.returncode,
|
||||
"stdout": result.stdout,
|
||||
"stderr": result.stderr,
|
||||
"command": command,
|
||||
"working_dir": working_dir
|
||||
}
|
||||
|
||||
except subprocess.TimeoutExpired:
|
||||
return {"error": "Command timed out"}
|
||||
except Exception as e:
|
||||
return {"error": str(e)}
|
||||
|
||||
async def write_file(self, file_path: str, content: str, backup: bool = True) -> Dict[str, Any]:
|
||||
"""ファイルを書き込み(バックアップオプション付き)"""
|
||||
try:
|
||||
file_path_obj = Path(file_path)
|
||||
|
||||
# バックアップ作成
|
||||
backup_path = None
|
||||
if backup and file_path_obj.exists():
|
||||
backup_path = f"{file_path}.backup"
|
||||
with open(file_path, 'r', encoding='utf-8') as src:
|
||||
with open(backup_path, 'w', encoding='utf-8') as dst:
|
||||
dst.write(src.read())
|
||||
|
||||
# ファイル書き込み
|
||||
file_path_obj.parent.mkdir(parents=True, exist_ok=True)
|
||||
with open(file_path, 'w', encoding='utf-8') as f:
|
||||
f.write(content)
|
||||
|
||||
return {
|
||||
"status": "success",
|
||||
"file_path": file_path,
|
||||
"backup_path": backup_path,
|
||||
"bytes_written": len(content.encode('utf-8'))
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
return {"error": str(e)}
|
||||
|
||||
def get_tools(self) -> List[Dict[str, Any]]:
|
||||
"""利用可能なツール一覧"""
|
||||
return [
|
||||
{
|
||||
"name": "generate_code",
|
||||
"description": "ローカルLLMでコード生成",
|
||||
"parameters": {
|
||||
"prompt": "string",
|
||||
"language": "string (optional, default: python)"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "analyze_file",
|
||||
"description": "ファイルを分析",
|
||||
"parameters": {
|
||||
"file_path": "string",
|
||||
"analysis_prompt": "string (optional)"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "explain_code",
|
||||
"description": "コードを説明",
|
||||
"parameters": {
|
||||
"code": "string",
|
||||
"language": "string (optional, default: python)"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "create_project",
|
||||
"description": "新しいプロジェクトを作成",
|
||||
"parameters": {
|
||||
"project_type": "string (rust/python/node)",
|
||||
"project_name": "string",
|
||||
"location": "string (optional, default: .)"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "execute_command",
|
||||
"description": "シェルコマンドを実行",
|
||||
"parameters": {
|
||||
"command": "string",
|
||||
"working_dir": "string (optional, default: .)"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "write_file",
|
||||
"description": "ファイルを書き込み",
|
||||
"parameters": {
|
||||
"file_path": "string",
|
||||
"content": "string",
|
||||
"backup": "boolean (optional, default: true)"
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
async def execute_tool(self, tool_name: str, params: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""ツールを実行"""
|
||||
try:
|
||||
if tool_name == "generate_code":
|
||||
result = await self.code_with_local_llm(
|
||||
prompt=params["prompt"],
|
||||
language=params.get("language", "python")
|
||||
)
|
||||
return result
|
||||
|
||||
elif tool_name == "analyze_file":
|
||||
result = await self.analyze_file(
|
||||
file_path=params["file_path"],
|
||||
analysis_prompt=params.get("analysis_prompt", "Analyze this file")
|
||||
)
|
||||
return result
|
||||
|
||||
elif tool_name == "explain_code":
|
||||
result = await self.explain_code(
|
||||
code=params["code"],
|
||||
language=params.get("language", "python")
|
||||
)
|
||||
return result
|
||||
|
||||
elif tool_name == "create_project":
|
||||
result = await self.create_project(
|
||||
project_type=params["project_type"],
|
||||
project_name=params["project_name"],
|
||||
location=params.get("location", ".")
|
||||
)
|
||||
return result
|
||||
|
||||
elif tool_name == "execute_command":
|
||||
result = await self.execute_command(
|
||||
command=params["command"],
|
||||
working_dir=params.get("working_dir", ".")
|
||||
)
|
||||
return result
|
||||
|
||||
elif tool_name == "write_file":
|
||||
result = await self.write_file(
|
||||
file_path=params["file_path"],
|
||||
content=params["content"],
|
||||
backup=params.get("backup", True)
|
||||
)
|
||||
return result
|
||||
|
||||
else:
|
||||
return {"error": f"Unknown tool: {tool_name}"}
|
||||
|
||||
except Exception as e:
|
||||
return {"error": str(e)}
|
||||
@@ -1,33 +0,0 @@
|
||||
[project]
|
||||
name = "aigpt"
|
||||
version = "0.1.0"
|
||||
description = "Autonomous transmission AI with unique personality based on relationship parameters"
|
||||
requires-python = ">=3.10"
|
||||
dependencies = [
|
||||
"click>=8.0.0",
|
||||
"typer>=0.9.0",
|
||||
"fastapi-mcp>=0.1.0",
|
||||
"pydantic>=2.0.0",
|
||||
"httpx>=0.24.0",
|
||||
"rich>=13.0.0",
|
||||
"python-dotenv>=1.0.0",
|
||||
"ollama>=0.1.0",
|
||||
"openai>=1.0.0",
|
||||
"uvicorn>=0.23.0",
|
||||
"apscheduler>=3.10.0",
|
||||
"croniter>=1.3.0",
|
||||
"prompt-toolkit>=3.0.0",
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
aigpt = "aigpt.cli:app"
|
||||
|
||||
[build-system]
|
||||
requires = ["setuptools>=61.0", "wheel"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[tool.setuptools.packages.find]
|
||||
where = ["src"]
|
||||
|
||||
[tool.setuptools.package-data]
|
||||
aigpt = ["data/*.json"]
|
||||
26
scpt/test_commands.sh
Executable file
26
scpt/test_commands.sh
Executable file
@@ -0,0 +1,26 @@
|
||||
#!/bin/bash
|
||||
|
||||
echo "=== Testing aigpt-rs CLI commands ==="
|
||||
echo
|
||||
|
||||
echo "1. Testing configuration loading:"
|
||||
cargo run --bin test-config
|
||||
echo
|
||||
|
||||
echo "2. Testing fortune command:"
|
||||
cargo run --bin aigpt-rs -- fortune
|
||||
echo
|
||||
|
||||
echo "3. Testing chat with Ollama:"
|
||||
cargo run --bin aigpt-rs -- chat test_user "Hello from Rust!" --provider ollama --model qwen2.5-coder:latest
|
||||
echo
|
||||
|
||||
echo "4. Testing chat with OpenAI:"
|
||||
cargo run --bin aigpt-rs -- chat test_user "What's the capital of Japan?" --provider openai --model gpt-4o-mini
|
||||
echo
|
||||
|
||||
echo "5. Testing relationships command:"
|
||||
cargo run --bin aigpt-rs -- relationships
|
||||
echo
|
||||
|
||||
echo "=== All tests completed ==="
|
||||
19
scpt/test_completion.sh
Executable file
19
scpt/test_completion.sh
Executable file
@@ -0,0 +1,19 @@
|
||||
#!/bin/bash
|
||||
|
||||
echo "=== Testing aigpt-rs shell tab completion ==="
|
||||
echo
|
||||
echo "To test tab completion, run:"
|
||||
echo "cargo run --bin aigpt-rs -- shell syui"
|
||||
echo
|
||||
echo "Then try these commands and press Tab:"
|
||||
echo " /st[TAB] -> should complete to /status"
|
||||
echo " /mem[TAB] -> should complete to /memories"
|
||||
echo " !l[TAB] -> should complete to !ls"
|
||||
echo " !g[TAB] -> should show !git, !grep"
|
||||
echo
|
||||
echo "Manual test instructions:"
|
||||
echo "1. Type '/st' and press TAB - should complete to '/status'"
|
||||
echo "2. Type '!l' and press TAB - should complete to '!ls'"
|
||||
echo "3. Type '!g' and press TAB - should show git/grep options"
|
||||
echo
|
||||
echo "Run the shell now..."
|
||||
18
scpt/test_shell.sh
Normal file
18
scpt/test_shell.sh
Normal file
@@ -0,0 +1,18 @@
|
||||
#!/bin/bash
|
||||
|
||||
echo "=== Testing aigpt-rs shell functionality ==="
|
||||
echo
|
||||
|
||||
echo "1. Testing shell command with help:"
|
||||
echo "help" | cargo run --bin aigpt-rs -- shell test_user --provider ollama --model qwen2.5-coder:latest
|
||||
echo
|
||||
|
||||
echo "2. Testing basic commands:"
|
||||
echo -e "!pwd\n!ls\nexit" | cargo run --bin aigpt-rs -- shell test_user --provider ollama --model qwen2.5-coder:latest
|
||||
echo
|
||||
|
||||
echo "3. Testing AI commands:"
|
||||
echo -e "/status\n/fortune\nexit" | cargo run --bin aigpt-rs -- shell test_user --provider ollama --model qwen2.5-coder:latest
|
||||
echo
|
||||
|
||||
echo "=== Shell tests completed ==="
|
||||
22
scpt/test_shell_manual.sh
Executable file
22
scpt/test_shell_manual.sh
Executable file
@@ -0,0 +1,22 @@
|
||||
#!/bin/bash
|
||||
|
||||
echo "=== Testing aigpt-rs shell manually ==="
|
||||
echo
|
||||
|
||||
# Test with echo to simulate input
|
||||
echo "Testing with simple command..."
|
||||
echo "/status" | timeout 10 cargo run --bin aigpt-rs -- shell syui --provider ollama --model qwen2.5-coder:latest
|
||||
echo "Exit code: $?"
|
||||
echo
|
||||
|
||||
echo "Testing with help command..."
|
||||
echo "help" | timeout 10 cargo run --bin aigpt-rs -- shell syui --provider ollama --model qwen2.5-coder:latest
|
||||
echo "Exit code: $?"
|
||||
echo
|
||||
|
||||
echo "Testing with AI message..."
|
||||
echo "Hello AI" | timeout 10 cargo run --bin aigpt-rs -- shell syui --provider ollama --model qwen2.5-coder:latest
|
||||
echo "Exit code: $?"
|
||||
echo
|
||||
|
||||
echo "=== Manual shell tests completed ==="
|
||||
@@ -1,23 +0,0 @@
|
||||
#!/bin/zsh
|
||||
# Setup Python virtual environment in the new config directory
|
||||
|
||||
VENV_DIR="$HOME/.config/syui/ai/gpt/venv"
|
||||
|
||||
echo "Creating Python virtual environment at: $VENV_DIR"
|
||||
python -m venv "$VENV_DIR"
|
||||
|
||||
echo "Activating virtual environment..."
|
||||
source "$VENV_DIR/bin/activate"
|
||||
|
||||
echo "Installing aigpt package..."
|
||||
cd "$(dirname "$0")"
|
||||
pip install -e .
|
||||
|
||||
echo "Setup complete!"
|
||||
echo "To activate the virtual environment, run:"
|
||||
echo "source ~/.config/syui/ai/gpt/venv/bin/activate"
|
||||
|
||||
if [ -z "`$SHELL -i -c \"alias aigpt\"`" ]; then
|
||||
echo 'alias aigpt="$HOME/.config/syui/ai/gpt/venv/bin/aigpt"' >> ${HOME}/.$(basename $SHELL)rc
|
||||
exec $SHELL
|
||||
fi
|
||||
1
shell
1
shell
Submodule shell deleted from 81ae0037d9
246
src/ai_provider.rs
Normal file
246
src/ai_provider.rs
Normal file
@@ -0,0 +1,246 @@
|
||||
use anyhow::{Result, anyhow};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub enum AIProvider {
|
||||
OpenAI,
|
||||
Ollama,
|
||||
Claude,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for AIProvider {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
AIProvider::OpenAI => write!(f, "openai"),
|
||||
AIProvider::Ollama => write!(f, "ollama"),
|
||||
AIProvider::Claude => write!(f, "claude"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::str::FromStr for AIProvider {
|
||||
type Err = anyhow::Error;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self> {
|
||||
match s.to_lowercase().as_str() {
|
||||
"openai" | "gpt" => Ok(AIProvider::OpenAI),
|
||||
"ollama" => Ok(AIProvider::Ollama),
|
||||
"claude" => Ok(AIProvider::Claude),
|
||||
_ => Err(anyhow!("Unknown AI provider: {}", s)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct AIConfig {
|
||||
pub provider: AIProvider,
|
||||
pub model: String,
|
||||
pub api_key: Option<String>,
|
||||
pub base_url: Option<String>,
|
||||
pub max_tokens: Option<u32>,
|
||||
pub temperature: Option<f32>,
|
||||
}
|
||||
|
||||
impl Default for AIConfig {
|
||||
fn default() -> Self {
|
||||
AIConfig {
|
||||
provider: AIProvider::Ollama,
|
||||
model: "llama2".to_string(),
|
||||
api_key: None,
|
||||
base_url: Some("http://localhost:11434".to_string()),
|
||||
max_tokens: Some(2048),
|
||||
temperature: Some(0.7),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ChatMessage {
|
||||
pub role: String,
|
||||
pub content: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ChatResponse {
|
||||
pub content: String,
|
||||
pub tokens_used: Option<u32>,
|
||||
pub model: String,
|
||||
}
|
||||
|
||||
pub struct AIProviderClient {
|
||||
config: AIConfig,
|
||||
http_client: reqwest::Client,
|
||||
}
|
||||
|
||||
impl AIProviderClient {
|
||||
pub fn new(config: AIConfig) -> Self {
|
||||
let http_client = reqwest::Client::new();
|
||||
|
||||
AIProviderClient {
|
||||
config,
|
||||
http_client,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn chat(&self, messages: Vec<ChatMessage>, system_prompt: Option<String>) -> Result<ChatResponse> {
|
||||
match self.config.provider {
|
||||
AIProvider::OpenAI => self.chat_openai(messages, system_prompt).await,
|
||||
AIProvider::Ollama => self.chat_ollama(messages, system_prompt).await,
|
||||
AIProvider::Claude => self.chat_claude(messages, system_prompt).await,
|
||||
}
|
||||
}
|
||||
|
||||
async fn chat_openai(&self, messages: Vec<ChatMessage>, system_prompt: Option<String>) -> Result<ChatResponse> {
|
||||
let api_key = self.config.api_key.as_ref()
|
||||
.ok_or_else(|| anyhow!("OpenAI API key required"))?;
|
||||
|
||||
let mut request_messages = Vec::new();
|
||||
|
||||
// Add system prompt if provided
|
||||
if let Some(system) = system_prompt {
|
||||
request_messages.push(serde_json::json!({
|
||||
"role": "system",
|
||||
"content": system
|
||||
}));
|
||||
}
|
||||
|
||||
// Add conversation messages
|
||||
for msg in messages {
|
||||
request_messages.push(serde_json::json!({
|
||||
"role": msg.role,
|
||||
"content": msg.content
|
||||
}));
|
||||
}
|
||||
|
||||
let request_body = serde_json::json!({
|
||||
"model": self.config.model,
|
||||
"messages": request_messages,
|
||||
"max_tokens": self.config.max_tokens,
|
||||
"temperature": self.config.temperature
|
||||
});
|
||||
|
||||
let response = self.http_client
|
||||
.post("https://api.openai.com/v1/chat/completions")
|
||||
.header("Authorization", format!("Bearer {}", api_key))
|
||||
.header("Content-Type", "application/json")
|
||||
.json(&request_body)
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
if !response.status().is_success() {
|
||||
let error_text = response.text().await?;
|
||||
return Err(anyhow!("OpenAI API error: {}", error_text));
|
||||
}
|
||||
|
||||
let response_json: serde_json::Value = response.json().await?;
|
||||
|
||||
let content = response_json["choices"][0]["message"]["content"]
|
||||
.as_str()
|
||||
.ok_or_else(|| anyhow!("Invalid OpenAI response format"))?
|
||||
.to_string();
|
||||
|
||||
let tokens_used = response_json["usage"]["total_tokens"]
|
||||
.as_u64()
|
||||
.map(|t| t as u32);
|
||||
|
||||
Ok(ChatResponse {
|
||||
content,
|
||||
tokens_used,
|
||||
model: self.config.model.clone(),
|
||||
})
|
||||
}
|
||||
|
||||
async fn chat_ollama(&self, messages: Vec<ChatMessage>, system_prompt: Option<String>) -> Result<ChatResponse> {
|
||||
let default_url = "http://localhost:11434".to_string();
|
||||
let base_url = self.config.base_url.as_ref()
|
||||
.unwrap_or(&default_url);
|
||||
|
||||
let mut request_messages = Vec::new();
|
||||
|
||||
// Add system prompt if provided
|
||||
if let Some(system) = system_prompt {
|
||||
request_messages.push(serde_json::json!({
|
||||
"role": "system",
|
||||
"content": system
|
||||
}));
|
||||
}
|
||||
|
||||
// Add conversation messages
|
||||
for msg in messages {
|
||||
request_messages.push(serde_json::json!({
|
||||
"role": msg.role,
|
||||
"content": msg.content
|
||||
}));
|
||||
}
|
||||
|
||||
let request_body = serde_json::json!({
|
||||
"model": self.config.model,
|
||||
"messages": request_messages,
|
||||
"stream": false
|
||||
});
|
||||
|
||||
let url = format!("{}/api/chat", base_url);
|
||||
let response = self.http_client
|
||||
.post(&url)
|
||||
.header("Content-Type", "application/json")
|
||||
.json(&request_body)
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
if !response.status().is_success() {
|
||||
let error_text = response.text().await?;
|
||||
return Err(anyhow!("Ollama API error: {}", error_text));
|
||||
}
|
||||
|
||||
let response_json: serde_json::Value = response.json().await?;
|
||||
|
||||
let content = response_json["message"]["content"]
|
||||
.as_str()
|
||||
.ok_or_else(|| anyhow!("Invalid Ollama response format"))?
|
||||
.to_string();
|
||||
|
||||
Ok(ChatResponse {
|
||||
content,
|
||||
tokens_used: None, // Ollama doesn't typically return token counts
|
||||
model: self.config.model.clone(),
|
||||
})
|
||||
}
|
||||
|
||||
async fn chat_claude(&self, _messages: Vec<ChatMessage>, _system_prompt: Option<String>) -> Result<ChatResponse> {
|
||||
// Claude API implementation would go here
|
||||
// For now, return a placeholder
|
||||
Err(anyhow!("Claude provider not yet implemented"))
|
||||
}
|
||||
|
||||
pub fn get_model(&self) -> &str {
|
||||
&self.config.model
|
||||
}
|
||||
|
||||
pub fn get_provider(&self) -> &AIProvider {
|
||||
&self.config.provider
|
||||
}
|
||||
}
|
||||
|
||||
// Convenience functions for creating common message types
|
||||
impl ChatMessage {
|
||||
pub fn user(content: impl Into<String>) -> Self {
|
||||
ChatMessage {
|
||||
role: "user".to_string(),
|
||||
content: content.into(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn assistant(content: impl Into<String>) -> Self {
|
||||
ChatMessage {
|
||||
role: "assistant".to_string(),
|
||||
content: content.into(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn system(content: impl Into<String>) -> Self {
|
||||
ChatMessage {
|
||||
role: "system".to_string(),
|
||||
content: content.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,18 +0,0 @@
|
||||
Metadata-Version: 2.4
|
||||
Name: aigpt
|
||||
Version: 0.1.0
|
||||
Summary: Autonomous transmission AI with unique personality based on relationship parameters
|
||||
Requires-Python: >=3.10
|
||||
Requires-Dist: click>=8.0.0
|
||||
Requires-Dist: typer>=0.9.0
|
||||
Requires-Dist: fastapi-mcp>=0.1.0
|
||||
Requires-Dist: pydantic>=2.0.0
|
||||
Requires-Dist: httpx>=0.24.0
|
||||
Requires-Dist: rich>=13.0.0
|
||||
Requires-Dist: python-dotenv>=1.0.0
|
||||
Requires-Dist: ollama>=0.1.0
|
||||
Requires-Dist: openai>=1.0.0
|
||||
Requires-Dist: uvicorn>=0.23.0
|
||||
Requires-Dist: apscheduler>=3.10.0
|
||||
Requires-Dist: croniter>=1.3.0
|
||||
Requires-Dist: prompt-toolkit>=3.0.0
|
||||
@@ -1,23 +0,0 @@
|
||||
README.md
|
||||
pyproject.toml
|
||||
src/aigpt/__init__.py
|
||||
src/aigpt/ai_provider.py
|
||||
src/aigpt/chatgpt_importer.py
|
||||
src/aigpt/cli.py
|
||||
src/aigpt/config.py
|
||||
src/aigpt/fortune.py
|
||||
src/aigpt/mcp_server.py
|
||||
src/aigpt/mcp_server_simple.py
|
||||
src/aigpt/memory.py
|
||||
src/aigpt/models.py
|
||||
src/aigpt/persona.py
|
||||
src/aigpt/project_manager.py
|
||||
src/aigpt/relationship.py
|
||||
src/aigpt/scheduler.py
|
||||
src/aigpt/transmission.py
|
||||
src/aigpt.egg-info/PKG-INFO
|
||||
src/aigpt.egg-info/SOURCES.txt
|
||||
src/aigpt.egg-info/dependency_links.txt
|
||||
src/aigpt.egg-info/entry_points.txt
|
||||
src/aigpt.egg-info/requires.txt
|
||||
src/aigpt.egg-info/top_level.txt
|
||||
@@ -1 +0,0 @@
|
||||
|
||||
@@ -1,2 +0,0 @@
|
||||
[console_scripts]
|
||||
aigpt = aigpt.cli:app
|
||||
@@ -1,13 +0,0 @@
|
||||
click>=8.0.0
|
||||
typer>=0.9.0
|
||||
fastapi-mcp>=0.1.0
|
||||
pydantic>=2.0.0
|
||||
httpx>=0.24.0
|
||||
rich>=13.0.0
|
||||
python-dotenv>=1.0.0
|
||||
ollama>=0.1.0
|
||||
openai>=1.0.0
|
||||
uvicorn>=0.23.0
|
||||
apscheduler>=3.10.0
|
||||
croniter>=1.3.0
|
||||
prompt-toolkit>=3.0.0
|
||||
@@ -1 +0,0 @@
|
||||
aigpt
|
||||
@@ -1,15 +0,0 @@
|
||||
"""ai.gpt - Autonomous transmission AI with unique personality"""
|
||||
|
||||
__version__ = "0.1.0"
|
||||
|
||||
from .memory import MemoryManager
|
||||
from .relationship import RelationshipTracker
|
||||
from .persona import Persona
|
||||
from .transmission import TransmissionController
|
||||
|
||||
__all__ = [
|
||||
"MemoryManager",
|
||||
"RelationshipTracker",
|
||||
"Persona",
|
||||
"TransmissionController",
|
||||
]
|
||||
@@ -1,207 +0,0 @@
|
||||
"""AI Provider integration for response generation"""
|
||||
|
||||
import os
|
||||
from typing import Optional, Dict, List, Any, Protocol
|
||||
from abc import abstractmethod
|
||||
import logging
|
||||
import httpx
|
||||
from openai import OpenAI
|
||||
import ollama
|
||||
|
||||
from .models import PersonaState, Memory
|
||||
from .config import Config
|
||||
|
||||
|
||||
class AIProvider(Protocol):
|
||||
"""Protocol for AI providers"""
|
||||
|
||||
@abstractmethod
|
||||
async def generate_response(
|
||||
self,
|
||||
prompt: str,
|
||||
persona_state: PersonaState,
|
||||
memories: List[Memory],
|
||||
system_prompt: Optional[str] = None
|
||||
) -> str:
|
||||
"""Generate a response based on prompt and context"""
|
||||
pass
|
||||
|
||||
|
||||
class OllamaProvider:
|
||||
"""Ollama AI provider"""
|
||||
|
||||
def __init__(self, model: str = "qwen2.5", host: Optional[str] = None):
|
||||
self.model = model
|
||||
# Use environment variable OLLAMA_HOST if available, otherwise use config or default
|
||||
self.host = host or os.getenv('OLLAMA_HOST', 'http://127.0.0.1:11434')
|
||||
# Ensure proper URL format
|
||||
if not self.host.startswith('http'):
|
||||
self.host = f'http://{self.host}'
|
||||
self.client = ollama.Client(host=self.host, timeout=60.0) # 60秒タイムアウト
|
||||
self.logger = logging.getLogger(__name__)
|
||||
self.logger.info(f"OllamaProvider initialized with host: {self.host}, model: {self.model}")
|
||||
|
||||
async def generate_response(
|
||||
self,
|
||||
prompt: str,
|
||||
persona_state: PersonaState,
|
||||
memories: List[Memory],
|
||||
system_prompt: Optional[str] = None
|
||||
) -> str:
|
||||
"""Generate response using Ollama"""
|
||||
|
||||
# Build context from memories
|
||||
memory_context = "\n".join([
|
||||
f"[{mem.level.value}] {mem.content[:200]}..."
|
||||
for mem in memories[:5]
|
||||
])
|
||||
|
||||
# Build personality context
|
||||
personality_desc = ", ".join([
|
||||
f"{trait}: {value:.1f}"
|
||||
for trait, value in persona_state.base_personality.items()
|
||||
])
|
||||
|
||||
# System prompt with persona context
|
||||
full_system_prompt = f"""You are an AI with the following characteristics:
|
||||
Current mood: {persona_state.current_mood}
|
||||
Fortune today: {persona_state.fortune.fortune_value}/10
|
||||
Personality traits: {personality_desc}
|
||||
|
||||
Recent memories:
|
||||
{memory_context}
|
||||
|
||||
{system_prompt or 'Respond naturally based on your current state and memories.'}"""
|
||||
|
||||
try:
|
||||
response = self.client.chat(
|
||||
model=self.model,
|
||||
messages=[
|
||||
{"role": "system", "content": full_system_prompt},
|
||||
{"role": "user", "content": prompt}
|
||||
]
|
||||
)
|
||||
return response['message']['content']
|
||||
except Exception as e:
|
||||
self.logger.error(f"Ollama generation failed: {e}")
|
||||
return self._fallback_response(persona_state)
|
||||
|
||||
def chat(self, prompt: str, max_tokens: int = 200) -> str:
|
||||
"""Simple chat interface"""
|
||||
try:
|
||||
response = self.client.chat(
|
||||
model=self.model,
|
||||
messages=[
|
||||
{"role": "user", "content": prompt}
|
||||
],
|
||||
options={
|
||||
"num_predict": max_tokens,
|
||||
"temperature": 0.7,
|
||||
"top_p": 0.9,
|
||||
},
|
||||
stream=False # ストリーミング無効化で安定性向上
|
||||
)
|
||||
return response['message']['content']
|
||||
except Exception as e:
|
||||
self.logger.error(f"Ollama chat failed (host: {self.host}): {e}")
|
||||
return "I'm having trouble connecting to the AI model."
|
||||
|
||||
def _fallback_response(self, persona_state: PersonaState) -> str:
|
||||
"""Fallback response based on mood"""
|
||||
mood_responses = {
|
||||
"joyful": "That's wonderful! I'm feeling great today!",
|
||||
"cheerful": "That sounds nice!",
|
||||
"neutral": "I understand.",
|
||||
"melancholic": "I see... That's something to think about.",
|
||||
"contemplative": "Hmm, let me consider that..."
|
||||
}
|
||||
return mood_responses.get(persona_state.current_mood, "I see.")
|
||||
|
||||
|
||||
class OpenAIProvider:
|
||||
"""OpenAI API provider"""
|
||||
|
||||
def __init__(self, model: str = "gpt-4o-mini", api_key: Optional[str] = None):
|
||||
self.model = model
|
||||
# Try to get API key from config first
|
||||
config = Config()
|
||||
self.api_key = api_key or config.get_api_key("openai") or os.getenv("OPENAI_API_KEY")
|
||||
if not self.api_key:
|
||||
raise ValueError("OpenAI API key not provided. Set it with: aigpt config set providers.openai.api_key YOUR_KEY")
|
||||
self.client = OpenAI(api_key=self.api_key)
|
||||
self.logger = logging.getLogger(__name__)
|
||||
|
||||
async def generate_response(
|
||||
self,
|
||||
prompt: str,
|
||||
persona_state: PersonaState,
|
||||
memories: List[Memory],
|
||||
system_prompt: Optional[str] = None
|
||||
) -> str:
|
||||
"""Generate response using OpenAI"""
|
||||
|
||||
# Build context similar to Ollama
|
||||
memory_context = "\n".join([
|
||||
f"[{mem.level.value}] {mem.content[:200]}..."
|
||||
for mem in memories[:5]
|
||||
])
|
||||
|
||||
personality_desc = ", ".join([
|
||||
f"{trait}: {value:.1f}"
|
||||
for trait, value in persona_state.base_personality.items()
|
||||
])
|
||||
|
||||
full_system_prompt = f"""You are an AI with unique personality traits and memories.
|
||||
Current mood: {persona_state.current_mood}
|
||||
Fortune today: {persona_state.fortune.fortune_value}/10
|
||||
Personality traits: {personality_desc}
|
||||
|
||||
Recent memories:
|
||||
{memory_context}
|
||||
|
||||
{system_prompt or 'Respond naturally based on your current state and memories. Be authentic to your mood and personality.'}"""
|
||||
|
||||
try:
|
||||
response = self.client.chat.completions.create(
|
||||
model=self.model,
|
||||
messages=[
|
||||
{"role": "system", "content": full_system_prompt},
|
||||
{"role": "user", "content": prompt}
|
||||
],
|
||||
temperature=0.7 + (persona_state.fortune.fortune_value - 5) * 0.05 # Vary by fortune
|
||||
)
|
||||
return response.choices[0].message.content
|
||||
except Exception as e:
|
||||
self.logger.error(f"OpenAI generation failed: {e}")
|
||||
return self._fallback_response(persona_state)
|
||||
|
||||
def _fallback_response(self, persona_state: PersonaState) -> str:
|
||||
"""Fallback response based on mood"""
|
||||
mood_responses = {
|
||||
"joyful": "What a delightful conversation!",
|
||||
"cheerful": "That's interesting!",
|
||||
"neutral": "I understand what you mean.",
|
||||
"melancholic": "I've been thinking about that too...",
|
||||
"contemplative": "That gives me something to ponder..."
|
||||
}
|
||||
return mood_responses.get(persona_state.current_mood, "I see.")
|
||||
|
||||
|
||||
def create_ai_provider(provider: str = "ollama", model: str = "qwen2.5", **kwargs) -> AIProvider:
|
||||
"""Factory function to create AI providers"""
|
||||
if provider == "ollama":
|
||||
# Try to get host from config if not provided in kwargs
|
||||
if 'host' not in kwargs:
|
||||
try:
|
||||
from .config import Config
|
||||
config = Config()
|
||||
config_host = config.get('providers.ollama.host')
|
||||
if config_host:
|
||||
kwargs['host'] = config_host
|
||||
except:
|
||||
pass # Use environment variable or default
|
||||
return OllamaProvider(model=model, **kwargs)
|
||||
elif provider == "openai":
|
||||
return OpenAIProvider(model=model, **kwargs)
|
||||
else:
|
||||
raise ValueError(f"Unknown provider: {provider}")
|
||||
@@ -1,192 +0,0 @@
|
||||
"""ChatGPT conversation data importer for ai.gpt"""
|
||||
|
||||
import json
|
||||
import uuid
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Any, Optional
|
||||
import logging
|
||||
|
||||
from .models import Memory, MemoryLevel, Conversation
|
||||
from .memory import MemoryManager
|
||||
from .relationship import RelationshipTracker
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ChatGPTImporter:
|
||||
"""Import ChatGPT conversation data into ai.gpt memory system"""
|
||||
|
||||
def __init__(self, data_dir: Path):
|
||||
self.data_dir = data_dir
|
||||
self.memory_manager = MemoryManager(data_dir)
|
||||
self.relationship_tracker = RelationshipTracker(data_dir)
|
||||
|
||||
def import_from_file(self, file_path: Path, user_id: str = "chatgpt_user") -> Dict[str, Any]:
|
||||
"""Import ChatGPT conversations from JSON file
|
||||
|
||||
Args:
|
||||
file_path: Path to ChatGPT export JSON file
|
||||
user_id: User ID to associate with imported conversations
|
||||
|
||||
Returns:
|
||||
Dict with import statistics
|
||||
"""
|
||||
try:
|
||||
with open(file_path, 'r', encoding='utf-8') as f:
|
||||
chatgpt_data = json.load(f)
|
||||
|
||||
return self._import_conversations(chatgpt_data, user_id)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to import ChatGPT data: {e}")
|
||||
raise
|
||||
|
||||
def _import_conversations(self, chatgpt_data: List[Dict], user_id: str) -> Dict[str, Any]:
|
||||
"""Import multiple conversations from ChatGPT data"""
|
||||
stats = {
|
||||
"conversations_imported": 0,
|
||||
"messages_imported": 0,
|
||||
"user_messages": 0,
|
||||
"assistant_messages": 0,
|
||||
"skipped_messages": 0
|
||||
}
|
||||
|
||||
for conversation_data in chatgpt_data:
|
||||
try:
|
||||
conv_stats = self._import_single_conversation(conversation_data, user_id)
|
||||
|
||||
# Update overall stats
|
||||
stats["conversations_imported"] += 1
|
||||
stats["messages_imported"] += conv_stats["messages"]
|
||||
stats["user_messages"] += conv_stats["user_messages"]
|
||||
stats["assistant_messages"] += conv_stats["assistant_messages"]
|
||||
stats["skipped_messages"] += conv_stats["skipped"]
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to import conversation '{conversation_data.get('title', 'Unknown')}': {e}")
|
||||
continue
|
||||
|
||||
logger.info(f"Import completed: {stats}")
|
||||
return stats
|
||||
|
||||
def _import_single_conversation(self, conversation_data: Dict, user_id: str) -> Dict[str, int]:
|
||||
"""Import a single conversation from ChatGPT"""
|
||||
title = conversation_data.get("title", "Untitled")
|
||||
create_time = conversation_data.get("create_time")
|
||||
mapping = conversation_data.get("mapping", {})
|
||||
|
||||
stats = {"messages": 0, "user_messages": 0, "assistant_messages": 0, "skipped": 0}
|
||||
|
||||
# Extract messages in chronological order
|
||||
messages = self._extract_messages_from_mapping(mapping)
|
||||
|
||||
for msg in messages:
|
||||
try:
|
||||
role = msg["author"]["role"]
|
||||
content = self._extract_content(msg["content"])
|
||||
create_time_msg = msg.get("create_time")
|
||||
|
||||
if not content or role not in ["user", "assistant"]:
|
||||
stats["skipped"] += 1
|
||||
continue
|
||||
|
||||
# Convert to ai.gpt format
|
||||
if role == "user":
|
||||
# User message - create memory entry
|
||||
self._add_user_message(user_id, content, create_time_msg, title)
|
||||
stats["user_messages"] += 1
|
||||
|
||||
elif role == "assistant":
|
||||
# Assistant message - create AI response memory
|
||||
self._add_assistant_message(user_id, content, create_time_msg, title)
|
||||
stats["assistant_messages"] += 1
|
||||
|
||||
stats["messages"] += 1
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to process message in '{title}': {e}")
|
||||
stats["skipped"] += 1
|
||||
continue
|
||||
|
||||
logger.info(f"Imported conversation '{title}': {stats}")
|
||||
return stats
|
||||
|
||||
def _extract_messages_from_mapping(self, mapping: Dict) -> List[Dict]:
|
||||
"""Extract messages from ChatGPT mapping structure in chronological order"""
|
||||
messages = []
|
||||
|
||||
for node_id, node_data in mapping.items():
|
||||
message = node_data.get("message")
|
||||
if message and message.get("author", {}).get("role") in ["user", "assistant"]:
|
||||
# Skip system messages and hidden messages
|
||||
metadata = message.get("metadata", {})
|
||||
if not metadata.get("is_visually_hidden_from_conversation", False):
|
||||
messages.append(message)
|
||||
|
||||
# Sort by create_time if available
|
||||
messages.sort(key=lambda x: x.get("create_time") or 0)
|
||||
return messages
|
||||
|
||||
def _extract_content(self, content_data: Dict) -> Optional[str]:
|
||||
"""Extract text content from ChatGPT content structure"""
|
||||
if not content_data:
|
||||
return None
|
||||
|
||||
content_type = content_data.get("content_type")
|
||||
|
||||
if content_type == "text":
|
||||
parts = content_data.get("parts", [])
|
||||
if parts and parts[0]:
|
||||
return parts[0].strip()
|
||||
|
||||
elif content_type == "user_editable_context":
|
||||
# User context/instructions
|
||||
user_instructions = content_data.get("user_instructions", "")
|
||||
if user_instructions:
|
||||
return f"[User Context] {user_instructions}"
|
||||
|
||||
return None
|
||||
|
||||
def _add_user_message(self, user_id: str, content: str, create_time: Optional[float], conversation_title: str):
|
||||
"""Add user message to ai.gpt memory system"""
|
||||
timestamp = datetime.fromtimestamp(create_time) if create_time else datetime.now()
|
||||
|
||||
# Create conversation record
|
||||
conversation = Conversation(
|
||||
id=str(uuid.uuid4()),
|
||||
user_id=user_id,
|
||||
user_message=content,
|
||||
ai_response="", # Will be filled by next assistant message
|
||||
timestamp=timestamp,
|
||||
context={"source": "chatgpt_import", "conversation_title": conversation_title}
|
||||
)
|
||||
|
||||
# Add to memory with CORE level (imported data is important)
|
||||
memory = Memory(
|
||||
id=str(uuid.uuid4()),
|
||||
timestamp=timestamp,
|
||||
content=content,
|
||||
level=MemoryLevel.CORE,
|
||||
importance_score=0.8 # High importance for imported data
|
||||
)
|
||||
|
||||
self.memory_manager.add_memory(memory)
|
||||
|
||||
# Update relationship (positive interaction)
|
||||
self.relationship_tracker.update_interaction(user_id, 1.0)
|
||||
|
||||
def _add_assistant_message(self, user_id: str, content: str, create_time: Optional[float], conversation_title: str):
|
||||
"""Add assistant message to ai.gpt memory system"""
|
||||
timestamp = datetime.fromtimestamp(create_time) if create_time else datetime.now()
|
||||
|
||||
# Add assistant response as memory (AI's own responses can inform future behavior)
|
||||
memory = Memory(
|
||||
id=str(uuid.uuid4()),
|
||||
timestamp=timestamp,
|
||||
content=f"[AI Response] {content}",
|
||||
level=MemoryLevel.SUMMARY,
|
||||
importance_score=0.6 # Medium importance for AI responses
|
||||
)
|
||||
|
||||
self.memory_manager.add_memory(memory)
|
||||
921
src/aigpt/cli.py
921
src/aigpt/cli.py
@@ -1,921 +0,0 @@
|
||||
"""CLI interface for ai.gpt using typer"""
|
||||
|
||||
import typer
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
from rich.console import Console
|
||||
from rich.table import Table
|
||||
from rich.panel import Panel
|
||||
from datetime import datetime, timedelta
|
||||
import subprocess
|
||||
import shlex
|
||||
from prompt_toolkit import prompt as ptk_prompt
|
||||
from prompt_toolkit.completion import WordCompleter
|
||||
from prompt_toolkit.history import FileHistory
|
||||
from prompt_toolkit.auto_suggest import AutoSuggestFromHistory
|
||||
|
||||
from .persona import Persona
|
||||
from .transmission import TransmissionController
|
||||
from .mcp_server import AIGptMcpServer
|
||||
from .ai_provider import create_ai_provider
|
||||
from .scheduler import AIScheduler, TaskType
|
||||
from .config import Config
|
||||
from .project_manager import ContinuousDeveloper
|
||||
|
||||
app = typer.Typer(help="ai.gpt - Autonomous transmission AI with unique personality")
|
||||
console = Console()
|
||||
|
||||
# Configuration
|
||||
config = Config()
|
||||
DEFAULT_DATA_DIR = config.data_dir
|
||||
|
||||
|
||||
def get_persona(data_dir: Optional[Path] = None) -> Persona:
|
||||
"""Get or create persona instance"""
|
||||
if data_dir is None:
|
||||
data_dir = DEFAULT_DATA_DIR
|
||||
|
||||
data_dir.mkdir(parents=True, exist_ok=True)
|
||||
return Persona(data_dir)
|
||||
|
||||
|
||||
@app.command()
|
||||
def chat(
|
||||
user_id: str = typer.Argument(..., help="User ID (atproto DID)"),
|
||||
message: str = typer.Argument(..., help="Message to send to AI"),
|
||||
data_dir: Optional[Path] = typer.Option(None, "--data-dir", "-d", help="Data directory"),
|
||||
model: Optional[str] = typer.Option(None, "--model", "-m", help="AI model to use"),
|
||||
provider: Optional[str] = typer.Option(None, "--provider", help="AI provider (ollama/openai)")
|
||||
):
|
||||
"""Chat with the AI"""
|
||||
persona = get_persona(data_dir)
|
||||
|
||||
# Create AI provider if specified
|
||||
ai_provider = None
|
||||
if provider and model:
|
||||
try:
|
||||
ai_provider = create_ai_provider(provider=provider, model=model)
|
||||
console.print(f"[dim]Using {provider} with model {model}[/dim]\n")
|
||||
except Exception as e:
|
||||
console.print(f"[yellow]Warning: Could not create AI provider: {e}[/yellow]")
|
||||
console.print("[yellow]Falling back to simple responses[/yellow]\n")
|
||||
|
||||
# Process interaction
|
||||
response, relationship_delta = persona.process_interaction(user_id, message, ai_provider)
|
||||
|
||||
# Get updated relationship
|
||||
relationship = persona.relationships.get_or_create_relationship(user_id)
|
||||
|
||||
# Display response
|
||||
console.print(Panel(response, title="AI Response", border_style="cyan"))
|
||||
|
||||
# Show relationship status
|
||||
status_color = "green" if relationship.transmission_enabled else "yellow"
|
||||
if relationship.is_broken:
|
||||
status_color = "red"
|
||||
|
||||
console.print(f"\n[{status_color}]Relationship Status:[/{status_color}] {relationship.status.value}")
|
||||
console.print(f"Score: {relationship.score:.2f} / {relationship.threshold}")
|
||||
console.print(f"Transmission: {'✓ Enabled' if relationship.transmission_enabled else '✗ Disabled'}")
|
||||
|
||||
if relationship.is_broken:
|
||||
console.print("[red]⚠️ This relationship is broken and cannot be repaired.[/red]")
|
||||
|
||||
|
||||
@app.command()
|
||||
def status(
|
||||
user_id: Optional[str] = typer.Argument(None, help="User ID to check status for"),
|
||||
data_dir: Optional[Path] = typer.Option(None, "--data-dir", "-d", help="Data directory")
|
||||
):
|
||||
"""Check AI status and relationships"""
|
||||
persona = get_persona(data_dir)
|
||||
state = persona.get_current_state()
|
||||
|
||||
# Show AI state
|
||||
console.print(Panel(f"[cyan]ai.gpt Status[/cyan]", expand=False))
|
||||
console.print(f"Mood: {state.current_mood}")
|
||||
console.print(f"Fortune: {state.fortune.fortune_value}/10")
|
||||
|
||||
if state.fortune.breakthrough_triggered:
|
||||
console.print("[yellow]⚡ Breakthrough triggered![/yellow]")
|
||||
|
||||
# Show personality traits
|
||||
table = Table(title="Current Personality")
|
||||
table.add_column("Trait", style="cyan")
|
||||
table.add_column("Value", style="magenta")
|
||||
|
||||
for trait, value in state.base_personality.items():
|
||||
table.add_row(trait.capitalize(), f"{value:.2f}")
|
||||
|
||||
console.print(table)
|
||||
|
||||
# Show specific relationship if requested
|
||||
if user_id:
|
||||
rel = persona.relationships.get_or_create_relationship(user_id)
|
||||
console.print(f"\n[cyan]Relationship with {user_id}:[/cyan]")
|
||||
console.print(f"Status: {rel.status.value}")
|
||||
console.print(f"Score: {rel.score:.2f}")
|
||||
console.print(f"Total Interactions: {rel.total_interactions}")
|
||||
console.print(f"Transmission Enabled: {rel.transmission_enabled}")
|
||||
|
||||
|
||||
@app.command()
|
||||
def fortune(
|
||||
data_dir: Optional[Path] = typer.Option(None, "--data-dir", "-d", help="Data directory")
|
||||
):
|
||||
"""Check today's AI fortune"""
|
||||
persona = get_persona(data_dir)
|
||||
fortune = persona.fortune_system.get_today_fortune()
|
||||
|
||||
# Fortune display
|
||||
fortune_bar = "🌟" * fortune.fortune_value + "☆" * (10 - fortune.fortune_value)
|
||||
|
||||
console.print(Panel(
|
||||
f"{fortune_bar}\n\n"
|
||||
f"Today's Fortune: {fortune.fortune_value}/10\n"
|
||||
f"Date: {fortune.date}",
|
||||
title="AI Fortune",
|
||||
border_style="yellow"
|
||||
))
|
||||
|
||||
if fortune.consecutive_good > 0:
|
||||
console.print(f"[green]Consecutive good days: {fortune.consecutive_good}[/green]")
|
||||
if fortune.consecutive_bad > 0:
|
||||
console.print(f"[red]Consecutive bad days: {fortune.consecutive_bad}[/red]")
|
||||
|
||||
if fortune.breakthrough_triggered:
|
||||
console.print("\n[yellow]⚡ BREAKTHROUGH! Special fortune activated![/yellow]")
|
||||
|
||||
|
||||
@app.command()
|
||||
def transmit(
|
||||
data_dir: Optional[Path] = typer.Option(None, "--data-dir", "-d", help="Data directory"),
|
||||
dry_run: bool = typer.Option(True, "--dry-run/--execute", help="Dry run or execute")
|
||||
):
|
||||
"""Check and execute autonomous transmissions"""
|
||||
persona = get_persona(data_dir)
|
||||
controller = TransmissionController(persona, persona.data_dir)
|
||||
|
||||
eligible = controller.check_transmission_eligibility()
|
||||
|
||||
if not eligible:
|
||||
console.print("[yellow]No users eligible for transmission.[/yellow]")
|
||||
return
|
||||
|
||||
console.print(f"[green]Found {len(eligible)} eligible users for transmission:[/green]")
|
||||
|
||||
for user_id, rel in eligible.items():
|
||||
message = controller.generate_transmission_message(user_id)
|
||||
if message:
|
||||
console.print(f"\n[cyan]To:[/cyan] {user_id}")
|
||||
console.print(f"[cyan]Message:[/cyan] {message}")
|
||||
console.print(f"[cyan]Relationship:[/cyan] {rel.status.value} (score: {rel.score:.2f})")
|
||||
|
||||
if not dry_run:
|
||||
# In real implementation, send via atproto or other channel
|
||||
controller.record_transmission(user_id, message, success=True)
|
||||
console.print("[green]✓ Transmitted[/green]")
|
||||
else:
|
||||
console.print("[yellow]→ Would transmit (dry run)[/yellow]")
|
||||
|
||||
|
||||
@app.command()
|
||||
def maintenance(
|
||||
data_dir: Optional[Path] = typer.Option(None, "--data-dir", "-d", help="Data directory")
|
||||
):
|
||||
"""Run daily maintenance tasks"""
|
||||
persona = get_persona(data_dir)
|
||||
|
||||
console.print("[cyan]Running daily maintenance...[/cyan]")
|
||||
persona.daily_maintenance()
|
||||
console.print("[green]✓ Maintenance completed[/green]")
|
||||
|
||||
|
||||
@app.command()
|
||||
def relationships(
|
||||
data_dir: Optional[Path] = typer.Option(None, "--data-dir", "-d", help="Data directory")
|
||||
):
|
||||
"""List all relationships"""
|
||||
persona = get_persona(data_dir)
|
||||
|
||||
table = Table(title="All Relationships")
|
||||
table.add_column("User ID", style="cyan")
|
||||
table.add_column("Status", style="magenta")
|
||||
table.add_column("Score", style="green")
|
||||
table.add_column("Transmission", style="yellow")
|
||||
table.add_column("Last Interaction")
|
||||
|
||||
for user_id, rel in persona.relationships.relationships.items():
|
||||
transmission = "✓" if rel.transmission_enabled else "✗"
|
||||
if rel.is_broken:
|
||||
transmission = "💔"
|
||||
|
||||
last_interaction = rel.last_interaction.strftime("%Y-%m-%d") if rel.last_interaction else "Never"
|
||||
|
||||
table.add_row(
|
||||
user_id[:16] + "...",
|
||||
rel.status.value,
|
||||
f"{rel.score:.2f}",
|
||||
transmission,
|
||||
last_interaction
|
||||
)
|
||||
|
||||
console.print(table)
|
||||
|
||||
|
||||
@app.command()
|
||||
def server(
|
||||
host: str = typer.Option("localhost", "--host", "-h", help="Server host"),
|
||||
port: int = typer.Option(8000, "--port", "-p", help="Server port"),
|
||||
data_dir: Optional[Path] = typer.Option(None, "--data-dir", "-d", help="Data directory"),
|
||||
model: str = typer.Option("qwen2.5", "--model", "-m", help="AI model to use"),
|
||||
provider: str = typer.Option("ollama", "--provider", help="AI provider (ollama/openai)")
|
||||
):
|
||||
"""Run MCP server for AI integration"""
|
||||
import uvicorn
|
||||
|
||||
if data_dir is None:
|
||||
data_dir = DEFAULT_DATA_DIR
|
||||
|
||||
data_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Create MCP server
|
||||
mcp_server = AIGptMcpServer(data_dir)
|
||||
app_instance = mcp_server.app
|
||||
|
||||
console.print(Panel(
|
||||
f"[cyan]Starting ai.gpt MCP Server[/cyan]\n\n"
|
||||
f"Host: {host}:{port}\n"
|
||||
f"Provider: {provider}\n"
|
||||
f"Model: {model}\n"
|
||||
f"Data: {data_dir}",
|
||||
title="MCP Server",
|
||||
border_style="green"
|
||||
))
|
||||
|
||||
# Store provider info in app state for later use
|
||||
app_instance.state.ai_provider = provider
|
||||
app_instance.state.ai_model = model
|
||||
|
||||
# Run server
|
||||
uvicorn.run(app_instance, host=host, port=port)
|
||||
|
||||
|
||||
@app.command()
|
||||
def schedule(
|
||||
action: str = typer.Argument(..., help="Action: add, list, enable, disable, remove, run"),
|
||||
task_type: Optional[str] = typer.Argument(None, help="Task type for add action"),
|
||||
schedule_expr: Optional[str] = typer.Argument(None, help="Schedule expression (cron or interval)"),
|
||||
data_dir: Optional[Path] = typer.Option(None, "--data-dir", "-d", help="Data directory"),
|
||||
task_id: Optional[str] = typer.Option(None, "--task-id", "-t", help="Task ID"),
|
||||
provider: Optional[str] = typer.Option(None, "--provider", help="AI provider for transmission"),
|
||||
model: Optional[str] = typer.Option(None, "--model", "-m", help="AI model for transmission")
|
||||
):
|
||||
"""Manage scheduled tasks"""
|
||||
persona = get_persona(data_dir)
|
||||
scheduler = AIScheduler(persona.data_dir, persona)
|
||||
|
||||
if action == "add":
|
||||
if not task_type or not schedule_expr:
|
||||
console.print("[red]Error: task_type and schedule required for add action[/red]")
|
||||
return
|
||||
|
||||
# Parse task type
|
||||
try:
|
||||
task_type_enum = TaskType(task_type)
|
||||
except ValueError:
|
||||
console.print(f"[red]Invalid task type. Valid types: {', '.join([t.value for t in TaskType])}[/red]")
|
||||
return
|
||||
|
||||
# Metadata for transmission tasks
|
||||
metadata = {}
|
||||
if task_type_enum == TaskType.TRANSMISSION_CHECK:
|
||||
metadata["provider"] = provider or "ollama"
|
||||
metadata["model"] = model or "qwen2.5"
|
||||
|
||||
try:
|
||||
task = scheduler.add_task(task_type_enum, schedule_expr, task_id, metadata)
|
||||
console.print(f"[green]✓ Added task {task.task_id}[/green]")
|
||||
console.print(f"Type: {task.task_type.value}")
|
||||
console.print(f"Schedule: {task.schedule}")
|
||||
except ValueError as e:
|
||||
console.print(f"[red]Error: {e}[/red]")
|
||||
|
||||
elif action == "list":
|
||||
tasks = scheduler.get_tasks()
|
||||
if not tasks:
|
||||
console.print("[yellow]No scheduled tasks[/yellow]")
|
||||
return
|
||||
|
||||
table = Table(title="Scheduled Tasks")
|
||||
table.add_column("Task ID", style="cyan")
|
||||
table.add_column("Type", style="magenta")
|
||||
table.add_column("Schedule", style="green")
|
||||
table.add_column("Enabled", style="yellow")
|
||||
table.add_column("Last Run")
|
||||
|
||||
for task in tasks:
|
||||
enabled = "✓" if task.enabled else "✗"
|
||||
last_run = task.last_run.strftime("%Y-%m-%d %H:%M") if task.last_run else "Never"
|
||||
|
||||
table.add_row(
|
||||
task.task_id[:20] + "..." if len(task.task_id) > 20 else task.task_id,
|
||||
task.task_type.value,
|
||||
task.schedule,
|
||||
enabled,
|
||||
last_run
|
||||
)
|
||||
|
||||
console.print(table)
|
||||
|
||||
elif action == "enable":
|
||||
if not task_id:
|
||||
console.print("[red]Error: --task-id required for enable action[/red]")
|
||||
return
|
||||
|
||||
scheduler.enable_task(task_id)
|
||||
console.print(f"[green]✓ Enabled task {task_id}[/green]")
|
||||
|
||||
elif action == "disable":
|
||||
if not task_id:
|
||||
console.print("[red]Error: --task-id required for disable action[/red]")
|
||||
return
|
||||
|
||||
scheduler.disable_task(task_id)
|
||||
console.print(f"[yellow]✓ Disabled task {task_id}[/yellow]")
|
||||
|
||||
elif action == "remove":
|
||||
if not task_id:
|
||||
console.print("[red]Error: --task-id required for remove action[/red]")
|
||||
return
|
||||
|
||||
scheduler.remove_task(task_id)
|
||||
console.print(f"[red]✓ Removed task {task_id}[/red]")
|
||||
|
||||
elif action == "run":
|
||||
console.print("[cyan]Starting scheduler daemon...[/cyan]")
|
||||
console.print("Press Ctrl+C to stop\n")
|
||||
|
||||
import asyncio
|
||||
|
||||
async def run_scheduler():
|
||||
scheduler.start()
|
||||
try:
|
||||
while True:
|
||||
await asyncio.sleep(1)
|
||||
except KeyboardInterrupt:
|
||||
scheduler.stop()
|
||||
|
||||
try:
|
||||
asyncio.run(run_scheduler())
|
||||
except KeyboardInterrupt:
|
||||
console.print("\n[yellow]Scheduler stopped[/yellow]")
|
||||
|
||||
else:
|
||||
console.print(f"[red]Unknown action: {action}[/red]")
|
||||
console.print("Valid actions: add, list, enable, disable, remove, run")
|
||||
|
||||
|
||||
@app.command()
|
||||
def shell(
|
||||
data_dir: Optional[Path] = typer.Option(None, "--data-dir", "-d", help="Data directory"),
|
||||
model: Optional[str] = typer.Option("qwen2.5", "--model", "-m", help="AI model to use"),
|
||||
provider: Optional[str] = typer.Option("ollama", "--provider", help="AI provider (ollama/openai)")
|
||||
):
|
||||
"""Interactive shell mode (ai.shell)"""
|
||||
persona = get_persona(data_dir)
|
||||
|
||||
# Create AI provider
|
||||
ai_provider = None
|
||||
if provider and model:
|
||||
try:
|
||||
ai_provider = create_ai_provider(provider=provider, model=model)
|
||||
console.print(f"[dim]Using {provider} with model {model}[/dim]\n")
|
||||
except Exception as e:
|
||||
console.print(f"[yellow]Warning: Could not create AI provider: {e}[/yellow]")
|
||||
console.print("[yellow]Falling back to simple responses[/yellow]\n")
|
||||
|
||||
# Welcome message
|
||||
console.print(Panel(
|
||||
"[cyan]Welcome to ai.shell[/cyan]\n\n"
|
||||
"Interactive AI-powered shell with command execution\n\n"
|
||||
"Commands:\n"
|
||||
" help - Show available commands\n"
|
||||
" exit/quit - Exit shell\n"
|
||||
" !<command> - Execute shell command\n"
|
||||
" chat <message> - Chat with AI\n"
|
||||
" status - Show AI status\n"
|
||||
" clear - Clear screen\n\n"
|
||||
"Type any message to interact with AI",
|
||||
title="ai.shell",
|
||||
border_style="green"
|
||||
))
|
||||
|
||||
# Command completer with shell commands
|
||||
builtin_commands = ['help', 'exit', 'quit', 'chat', 'status', 'clear', 'fortune', 'relationships', 'load']
|
||||
|
||||
# Add common shell commands
|
||||
shell_commands = ['ls', 'cd', 'pwd', 'cat', 'echo', 'grep', 'find', 'mkdir', 'rm', 'cp', 'mv',
|
||||
'git', 'python', 'pip', 'npm', 'node', 'cargo', 'rustc', 'docker', 'kubectl']
|
||||
|
||||
# AI-specific commands
|
||||
ai_commands = ['analyze', 'generate', 'explain', 'optimize', 'refactor', 'test', 'document']
|
||||
|
||||
# Remote execution commands (ai.bot integration)
|
||||
remote_commands = ['remote', 'isolated', 'aibot-status']
|
||||
|
||||
# Project management commands (Claude Code-like)
|
||||
project_commands = ['project-status', 'suggest-next', 'continuous']
|
||||
|
||||
all_commands = builtin_commands + ['!' + cmd for cmd in shell_commands] + ai_commands + remote_commands + project_commands
|
||||
completer = WordCompleter(all_commands, ignore_case=True)
|
||||
|
||||
# History file
|
||||
actual_data_dir = data_dir if data_dir else DEFAULT_DATA_DIR
|
||||
history_file = actual_data_dir / "shell_history.txt"
|
||||
history = FileHistory(str(history_file))
|
||||
|
||||
# Main shell loop
|
||||
current_user = "shell_user" # Default user for shell sessions
|
||||
|
||||
while True:
|
||||
try:
|
||||
# Get input with completion
|
||||
user_input = ptk_prompt(
|
||||
"ai.shell> ",
|
||||
completer=completer,
|
||||
history=history,
|
||||
auto_suggest=AutoSuggestFromHistory()
|
||||
).strip()
|
||||
|
||||
if not user_input:
|
||||
continue
|
||||
|
||||
# Exit commands
|
||||
if user_input.lower() in ['exit', 'quit']:
|
||||
console.print("[cyan]Goodbye![/cyan]")
|
||||
break
|
||||
|
||||
# Help command
|
||||
elif user_input.lower() == 'help':
|
||||
console.print(Panel(
|
||||
"[cyan]ai.shell Commands:[/cyan]\n\n"
|
||||
" help - Show this help message\n"
|
||||
" exit/quit - Exit the shell\n"
|
||||
" !<command> - Execute a shell command\n"
|
||||
" chat <message> - Explicitly chat with AI\n"
|
||||
" status - Show AI status\n"
|
||||
" fortune - Check AI fortune\n"
|
||||
" relationships - List all relationships\n"
|
||||
" clear - Clear the screen\n"
|
||||
" load - Load aishell.md project file\n\n"
|
||||
"[cyan]AI Commands:[/cyan]\n"
|
||||
" analyze <file> - Analyze a file with AI\n"
|
||||
" generate <desc> - Generate code from description\n"
|
||||
" explain <topic> - Get AI explanation\n\n"
|
||||
"[cyan]Remote Commands (ai.bot):[/cyan]\n"
|
||||
" remote <command> - Execute command in isolated container\n"
|
||||
" isolated <code> - Run Python code in isolated environment\n"
|
||||
" aibot-status - Check ai.bot server status\n\n"
|
||||
"[cyan]Project Commands (Claude Code-like):[/cyan]\n"
|
||||
" project-status - Analyze current project structure\n"
|
||||
" suggest-next - AI suggests next development steps\n"
|
||||
" continuous - Enable continuous development mode\n\n"
|
||||
"You can also type any message to chat with AI\n"
|
||||
"Use Tab for command completion",
|
||||
title="Help",
|
||||
border_style="yellow"
|
||||
))
|
||||
|
||||
# Clear command
|
||||
elif user_input.lower() == 'clear':
|
||||
console.clear()
|
||||
|
||||
# Shell command execution
|
||||
elif user_input.startswith('!'):
|
||||
cmd = user_input[1:].strip()
|
||||
if cmd:
|
||||
try:
|
||||
# Execute command
|
||||
result = subprocess.run(
|
||||
shlex.split(cmd),
|
||||
capture_output=True,
|
||||
text=True,
|
||||
shell=False
|
||||
)
|
||||
|
||||
if result.stdout:
|
||||
console.print(result.stdout.rstrip())
|
||||
if result.stderr:
|
||||
console.print(f"[red]{result.stderr.rstrip()}[/red]")
|
||||
|
||||
if result.returncode != 0:
|
||||
console.print(f"[red]Command exited with code {result.returncode}[/red]")
|
||||
except FileNotFoundError:
|
||||
console.print(f"[red]Command not found: {cmd.split()[0]}[/red]")
|
||||
except Exception as e:
|
||||
console.print(f"[red]Error executing command: {e}[/red]")
|
||||
|
||||
# Status command
|
||||
elif user_input.lower() == 'status':
|
||||
state = persona.get_current_state()
|
||||
console.print(f"\nMood: {state.current_mood}")
|
||||
console.print(f"Fortune: {state.fortune.fortune_value}/10")
|
||||
|
||||
rel = persona.relationships.get_or_create_relationship(current_user)
|
||||
console.print(f"\nRelationship Status: {rel.status.value}")
|
||||
console.print(f"Score: {rel.score:.2f} / {rel.threshold}")
|
||||
|
||||
# Fortune command
|
||||
elif user_input.lower() == 'fortune':
|
||||
fortune = persona.fortune_system.get_today_fortune()
|
||||
fortune_bar = "🌟" * fortune.fortune_value + "☆" * (10 - fortune.fortune_value)
|
||||
console.print(f"\n{fortune_bar}")
|
||||
console.print(f"Today's Fortune: {fortune.fortune_value}/10")
|
||||
|
||||
# Relationships command
|
||||
elif user_input.lower() == 'relationships':
|
||||
if persona.relationships.relationships:
|
||||
console.print("\n[cyan]Relationships:[/cyan]")
|
||||
for user_id, rel in persona.relationships.relationships.items():
|
||||
console.print(f" {user_id[:16]}... - {rel.status.value} ({rel.score:.2f})")
|
||||
else:
|
||||
console.print("[yellow]No relationships yet[/yellow]")
|
||||
|
||||
# Load aishell.md command
|
||||
elif user_input.lower() in ['load', 'load aishell.md', 'project']:
|
||||
# Try to find and load aishell.md
|
||||
search_paths = [
|
||||
Path.cwd() / "aishell.md",
|
||||
Path.cwd() / "docs" / "aishell.md",
|
||||
actual_data_dir.parent / "aishell.md",
|
||||
Path.cwd() / "claude.md", # Also check for claude.md
|
||||
]
|
||||
|
||||
loaded = False
|
||||
for path in search_paths:
|
||||
if path.exists():
|
||||
console.print(f"[cyan]Loading project file: {path}[/cyan]")
|
||||
with open(path, 'r', encoding='utf-8') as f:
|
||||
content = f.read()
|
||||
|
||||
# Process with AI to understand project
|
||||
load_prompt = f"I've loaded the project specification. Please analyze it and understand the project goals:\n\n{content[:3000]}"
|
||||
response, _ = persona.process_interaction(current_user, load_prompt, ai_provider)
|
||||
console.print(f"\n[green]Project loaded successfully![/green]")
|
||||
console.print(f"[cyan]AI Understanding:[/cyan]\n{response}")
|
||||
loaded = True
|
||||
break
|
||||
|
||||
if not loaded:
|
||||
console.print("[yellow]No aishell.md or claude.md found in project.[/yellow]")
|
||||
console.print("Create aishell.md to define project goals and AI instructions.")
|
||||
|
||||
# AI-powered commands
|
||||
elif user_input.lower().startswith('analyze '):
|
||||
# Analyze file or code with project context
|
||||
target = user_input[8:].strip()
|
||||
if os.path.exists(target):
|
||||
console.print(f"[cyan]Analyzing {target} with project context...[/cyan]")
|
||||
try:
|
||||
developer = ContinuousDeveloper(Path.cwd(), ai_provider)
|
||||
analysis = developer.analyze_file(target)
|
||||
console.print(f"\n[cyan]Analysis:[/cyan]\n{analysis}")
|
||||
except Exception as e:
|
||||
# Fallback to simple analysis
|
||||
with open(target, 'r') as f:
|
||||
content = f.read()
|
||||
analysis_prompt = f"Analyze this file and provide insights:\n\n{content[:2000]}"
|
||||
response, _ = persona.process_interaction(current_user, analysis_prompt, ai_provider)
|
||||
console.print(f"\n[cyan]Analysis:[/cyan]\n{response}")
|
||||
else:
|
||||
console.print(f"[red]File not found: {target}[/red]")
|
||||
|
||||
elif user_input.lower().startswith('generate '):
|
||||
# Generate code with project context
|
||||
gen_prompt = user_input[9:].strip()
|
||||
if gen_prompt:
|
||||
console.print("[cyan]Generating code with project context...[/cyan]")
|
||||
try:
|
||||
developer = ContinuousDeveloper(Path.cwd(), ai_provider)
|
||||
generated_code = developer.generate_code(gen_prompt)
|
||||
console.print(f"\n[cyan]Generated Code:[/cyan]\n{generated_code}")
|
||||
except Exception as e:
|
||||
# Fallback to simple generation
|
||||
full_prompt = f"Generate code for: {gen_prompt}. Provide clean, well-commented code."
|
||||
response, _ = persona.process_interaction(current_user, full_prompt, ai_provider)
|
||||
console.print(f"\n[cyan]Generated Code:[/cyan]\n{response}")
|
||||
|
||||
elif user_input.lower().startswith('explain '):
|
||||
# Explain code or concept
|
||||
topic = user_input[8:].strip()
|
||||
if topic:
|
||||
console.print(f"[cyan]Explaining {topic}...[/cyan]")
|
||||
full_prompt = f"Explain this in detail: {topic}"
|
||||
response, _ = persona.process_interaction(current_user, full_prompt, ai_provider)
|
||||
console.print(f"\n[cyan]Explanation:[/cyan]\n{response}")
|
||||
|
||||
# Remote execution commands (ai.bot integration)
|
||||
elif user_input.lower().startswith('remote '):
|
||||
# Execute command in ai.bot isolated container
|
||||
command = user_input[7:].strip()
|
||||
if command:
|
||||
console.print(f"[cyan]Executing remotely:[/cyan] {command}")
|
||||
try:
|
||||
import httpx
|
||||
import asyncio
|
||||
|
||||
async def execute_remote():
|
||||
async with httpx.AsyncClient(timeout=30.0) as client:
|
||||
response = await client.post(
|
||||
"http://localhost:8080/sh",
|
||||
json={"command": command},
|
||||
headers={"Content-Type": "application/json"}
|
||||
)
|
||||
return response
|
||||
|
||||
response = asyncio.run(execute_remote())
|
||||
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
console.print(f"[green]Output:[/green]\n{result.get('output', '')}")
|
||||
if result.get('error'):
|
||||
console.print(f"[red]Error:[/red] {result.get('error')}")
|
||||
console.print(f"[dim]Exit code: {result.get('exit_code', 0)} | Execution time: {result.get('execution_time', 'N/A')}[/dim]")
|
||||
else:
|
||||
console.print(f"[red]ai.bot error: HTTP {response.status_code}[/red]")
|
||||
except Exception as e:
|
||||
console.print(f"[red]Failed to connect to ai.bot: {e}[/red]")
|
||||
|
||||
elif user_input.lower().startswith('isolated '):
|
||||
# Execute Python code in isolated environment
|
||||
code = user_input[9:].strip()
|
||||
if code:
|
||||
console.print(f"[cyan]Running Python code in isolated container...[/cyan]")
|
||||
try:
|
||||
import httpx
|
||||
import asyncio
|
||||
|
||||
async def execute_python():
|
||||
python_command = f'python3 -c "{code.replace('"', '\\"')}"'
|
||||
async with httpx.AsyncClient(timeout=30.0) as client:
|
||||
response = await client.post(
|
||||
"http://localhost:8080/sh",
|
||||
json={"command": python_command},
|
||||
headers={"Content-Type": "application/json"}
|
||||
)
|
||||
return response
|
||||
|
||||
response = asyncio.run(execute_python())
|
||||
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
console.print(f"[green]Python Output:[/green]\n{result.get('output', '')}")
|
||||
if result.get('error'):
|
||||
console.print(f"[red]Error:[/red] {result.get('error')}")
|
||||
else:
|
||||
console.print(f"[red]ai.bot error: HTTP {response.status_code}[/red]")
|
||||
except Exception as e:
|
||||
console.print(f"[red]Failed to execute Python code: {e}[/red]")
|
||||
|
||||
elif user_input.lower() == 'aibot-status':
|
||||
# Check ai.bot server status
|
||||
console.print("[cyan]Checking ai.bot server status...[/cyan]")
|
||||
try:
|
||||
import httpx
|
||||
import asyncio
|
||||
|
||||
async def check_status():
|
||||
async with httpx.AsyncClient(timeout=10.0) as client:
|
||||
response = await client.get("http://localhost:8080/status")
|
||||
return response
|
||||
|
||||
response = asyncio.run(check_status())
|
||||
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
console.print(f"[green]ai.bot is online![/green]")
|
||||
console.print(f"Server info: {result}")
|
||||
else:
|
||||
console.print(f"[yellow]ai.bot responded with status {response.status_code}[/yellow]")
|
||||
except Exception as e:
|
||||
console.print(f"[red]ai.bot is offline: {e}[/red]")
|
||||
console.print("[dim]Make sure ai.bot is running on localhost:8080[/dim]")
|
||||
|
||||
# Project management commands (Claude Code-like)
|
||||
elif user_input.lower() == 'project-status':
|
||||
# プロジェクト構造分析
|
||||
console.print("[cyan]Analyzing project structure...[/cyan]")
|
||||
try:
|
||||
developer = ContinuousDeveloper(Path.cwd(), ai_provider)
|
||||
analysis = developer.analyze_project_structure()
|
||||
changes = developer.project_state.detect_changes()
|
||||
|
||||
console.print(f"[green]Project Analysis:[/green]")
|
||||
console.print(f"Language: {analysis['language']}")
|
||||
console.print(f"Framework: {analysis['framework']}")
|
||||
console.print(f"Structure: {analysis['structure']}")
|
||||
console.print(f"Dependencies: {analysis['dependencies']}")
|
||||
console.print(f"Code Patterns: {analysis['patterns']}")
|
||||
|
||||
if changes:
|
||||
console.print(f"\n[yellow]Recent Changes:[/yellow]")
|
||||
for file_path, change_type in changes.items():
|
||||
console.print(f" {change_type}: {file_path}")
|
||||
else:
|
||||
console.print(f"\n[dim]No recent changes detected[/dim]")
|
||||
|
||||
except Exception as e:
|
||||
console.print(f"[red]Error analyzing project: {e}[/red]")
|
||||
|
||||
elif user_input.lower() == 'suggest-next':
|
||||
# 次のステップを提案
|
||||
console.print("[cyan]AI is analyzing project and suggesting next steps...[/cyan]")
|
||||
try:
|
||||
developer = ContinuousDeveloper(Path.cwd(), ai_provider)
|
||||
suggestions = developer.suggest_next_steps()
|
||||
|
||||
console.print(f"[green]Suggested Next Steps:[/green]")
|
||||
for i, suggestion in enumerate(suggestions, 1):
|
||||
console.print(f" {i}. {suggestion}")
|
||||
|
||||
except Exception as e:
|
||||
console.print(f"[red]Error generating suggestions: {e}[/red]")
|
||||
|
||||
elif user_input.lower().startswith('continuous'):
|
||||
# 継続開発モード
|
||||
console.print("[cyan]Enabling continuous development mode...[/cyan]")
|
||||
console.print("[yellow]Continuous mode is experimental. Type 'exit-continuous' to exit.[/yellow]")
|
||||
|
||||
try:
|
||||
developer = ContinuousDeveloper(Path.cwd(), ai_provider)
|
||||
context = developer.load_project_context()
|
||||
|
||||
console.print(f"[green]Project context loaded:[/green]")
|
||||
console.print(f"Context: {len(context)} characters")
|
||||
|
||||
# Add to session memory for continuous context
|
||||
persona.process_interaction(current_user, f"Continuous development mode started for project: {context[:500]}", ai_provider)
|
||||
console.print("[dim]Project context added to AI memory for continuous development.[/dim]")
|
||||
|
||||
except Exception as e:
|
||||
console.print(f"[red]Error starting continuous mode: {e}[/red]")
|
||||
|
||||
# Chat command or direct message
|
||||
else:
|
||||
# Remove 'chat' prefix if present
|
||||
if user_input.lower().startswith('chat '):
|
||||
message = user_input[5:].strip()
|
||||
else:
|
||||
message = user_input
|
||||
|
||||
if message:
|
||||
# Process interaction with AI
|
||||
response, relationship_delta = persona.process_interaction(
|
||||
current_user, message, ai_provider
|
||||
)
|
||||
|
||||
# Display response
|
||||
console.print(f"\n[cyan]AI:[/cyan] {response}")
|
||||
|
||||
# Show relationship change if significant
|
||||
if abs(relationship_delta) >= 0.1:
|
||||
if relationship_delta > 0:
|
||||
console.print(f"[green](+{relationship_delta:.2f} relationship)[/green]")
|
||||
else:
|
||||
console.print(f"[red]({relationship_delta:.2f} relationship)[/red]")
|
||||
|
||||
except KeyboardInterrupt:
|
||||
console.print("\n[yellow]Use 'exit' or 'quit' to leave the shell[/yellow]")
|
||||
except EOFError:
|
||||
console.print("\n[cyan]Goodbye![/cyan]")
|
||||
break
|
||||
except Exception as e:
|
||||
console.print(f"[red]Error: {e}[/red]")
|
||||
|
||||
|
||||
@app.command()
|
||||
def config(
|
||||
action: str = typer.Argument(..., help="Action: get, set, delete, list"),
|
||||
key: Optional[str] = typer.Argument(None, help="Configuration key (dot notation)"),
|
||||
value: Optional[str] = typer.Argument(None, help="Value to set")
|
||||
):
|
||||
"""Manage configuration settings"""
|
||||
|
||||
if action == "get":
|
||||
if not key:
|
||||
console.print("[red]Error: key required for get action[/red]")
|
||||
return
|
||||
|
||||
val = config.get(key)
|
||||
if val is None:
|
||||
console.print(f"[yellow]Key '{key}' not found[/yellow]")
|
||||
else:
|
||||
console.print(f"[cyan]{key}[/cyan] = [green]{val}[/green]")
|
||||
|
||||
elif action == "set":
|
||||
if not key or value is None:
|
||||
console.print("[red]Error: key and value required for set action[/red]")
|
||||
return
|
||||
|
||||
# Special handling for sensitive keys
|
||||
if "password" in key or "api_key" in key:
|
||||
console.print(f"[cyan]Setting {key}[/cyan] = [dim]***hidden***[/dim]")
|
||||
else:
|
||||
console.print(f"[cyan]Setting {key}[/cyan] = [green]{value}[/green]")
|
||||
|
||||
config.set(key, value)
|
||||
console.print("[green]✓ Configuration saved[/green]")
|
||||
|
||||
elif action == "delete":
|
||||
if not key:
|
||||
console.print("[red]Error: key required for delete action[/red]")
|
||||
return
|
||||
|
||||
if config.delete(key):
|
||||
console.print(f"[green]✓ Deleted {key}[/green]")
|
||||
else:
|
||||
console.print(f"[yellow]Key '{key}' not found[/yellow]")
|
||||
|
||||
elif action == "list":
|
||||
config_instance = Config()
|
||||
keys = config_instance.list_keys(key or "")
|
||||
|
||||
if not keys:
|
||||
console.print("[yellow]No configuration keys found[/yellow]")
|
||||
return
|
||||
|
||||
table = Table(title="Configuration Settings")
|
||||
table.add_column("Key", style="cyan")
|
||||
table.add_column("Value", style="green")
|
||||
|
||||
for k in sorted(keys):
|
||||
val = config_instance.get(k)
|
||||
# Hide sensitive values
|
||||
if "password" in k or "api_key" in k:
|
||||
display_val = "***hidden***" if val else "not set"
|
||||
else:
|
||||
display_val = str(val) if val is not None else "not set"
|
||||
|
||||
table.add_row(k, display_val)
|
||||
|
||||
console.print(table)
|
||||
|
||||
else:
|
||||
console.print(f"[red]Unknown action: {action}[/red]")
|
||||
console.print("Valid actions: get, set, delete, list")
|
||||
|
||||
|
||||
@app.command()
|
||||
def import_chatgpt(
|
||||
file_path: Path = typer.Argument(..., help="Path to ChatGPT export JSON file"),
|
||||
user_id: str = typer.Option("chatgpt_user", "--user-id", "-u", help="User ID for imported conversations"),
|
||||
data_dir: Optional[Path] = typer.Option(None, "--data-dir", "-d", help="Data directory")
|
||||
):
|
||||
"""Import ChatGPT conversation data into ai.gpt memory system"""
|
||||
from .chatgpt_importer import ChatGPTImporter
|
||||
|
||||
if data_dir is None:
|
||||
data_dir = DEFAULT_DATA_DIR
|
||||
|
||||
data_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
if not file_path.exists():
|
||||
console.print(f"[red]Error: File not found: {file_path}[/red]")
|
||||
raise typer.Exit(1)
|
||||
|
||||
console.print(f"[cyan]Importing ChatGPT data from {file_path}[/cyan]")
|
||||
console.print(f"User ID: {user_id}")
|
||||
console.print(f"Data directory: {data_dir}")
|
||||
|
||||
try:
|
||||
importer = ChatGPTImporter(data_dir)
|
||||
stats = importer.import_from_file(file_path, user_id)
|
||||
|
||||
# Display results
|
||||
table = Table(title="Import Results")
|
||||
table.add_column("Metric", style="cyan")
|
||||
table.add_column("Count", style="green")
|
||||
|
||||
table.add_row("Conversations imported", str(stats["conversations_imported"]))
|
||||
table.add_row("Total messages", str(stats["messages_imported"]))
|
||||
table.add_row("User messages", str(stats["user_messages"]))
|
||||
table.add_row("Assistant messages", str(stats["assistant_messages"]))
|
||||
table.add_row("Skipped messages", str(stats["skipped_messages"]))
|
||||
|
||||
console.print(table)
|
||||
console.print(f"[green]✓ Import completed successfully![/green]")
|
||||
|
||||
# Show next steps
|
||||
console.print("\n[cyan]Next steps:[/cyan]")
|
||||
console.print(f"- Check memories: [yellow]aigpt status[/yellow]")
|
||||
console.print(f"- Chat with AI: [yellow]aigpt chat {user_id} \"hello\"[/yellow]")
|
||||
console.print(f"- View relationships: [yellow]aigpt relationships[/yellow]")
|
||||
|
||||
except Exception as e:
|
||||
console.print(f"[red]Error during import: {e}[/red]")
|
||||
raise typer.Exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
app()
|
||||
@@ -1,145 +0,0 @@
|
||||
"""Configuration management for ai.gpt"""
|
||||
|
||||
import json
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import Optional, Dict, Any
|
||||
import logging
|
||||
|
||||
|
||||
class Config:
|
||||
"""Manages configuration settings"""
|
||||
|
||||
def __init__(self, config_dir: Optional[Path] = None):
|
||||
if config_dir is None:
|
||||
config_dir = Path.home() / ".config" / "syui" / "ai" / "gpt"
|
||||
|
||||
self.config_dir = config_dir
|
||||
self.config_file = config_dir / "config.json"
|
||||
self.data_dir = config_dir / "data"
|
||||
|
||||
# Create directories if they don't exist
|
||||
self.config_dir.mkdir(parents=True, exist_ok=True)
|
||||
self.data_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
self.logger = logging.getLogger(__name__)
|
||||
self._config: Dict[str, Any] = {}
|
||||
self._load_config()
|
||||
|
||||
def _load_config(self):
|
||||
"""Load configuration from file"""
|
||||
if self.config_file.exists():
|
||||
try:
|
||||
with open(self.config_file, 'r', encoding='utf-8') as f:
|
||||
self._config = json.load(f)
|
||||
except Exception as e:
|
||||
self.logger.error(f"Failed to load config: {e}")
|
||||
self._config = {}
|
||||
else:
|
||||
# Initialize with default config
|
||||
self._config = {
|
||||
"providers": {
|
||||
"openai": {
|
||||
"api_key": None,
|
||||
"default_model": "gpt-4o-mini"
|
||||
},
|
||||
"ollama": {
|
||||
"host": "http://localhost:11434",
|
||||
"default_model": "qwen2.5"
|
||||
}
|
||||
},
|
||||
"atproto": {
|
||||
"handle": None,
|
||||
"password": None,
|
||||
"host": "https://bsky.social"
|
||||
},
|
||||
"default_provider": "ollama"
|
||||
}
|
||||
self._save_config()
|
||||
|
||||
def _save_config(self):
|
||||
"""Save configuration to file"""
|
||||
try:
|
||||
with open(self.config_file, 'w', encoding='utf-8') as f:
|
||||
json.dump(self._config, f, indent=2)
|
||||
except Exception as e:
|
||||
self.logger.error(f"Failed to save config: {e}")
|
||||
|
||||
def get(self, key: str, default: Any = None) -> Any:
|
||||
"""Get configuration value using dot notation"""
|
||||
keys = key.split('.')
|
||||
value = self._config
|
||||
|
||||
for k in keys:
|
||||
if isinstance(value, dict) and k in value:
|
||||
value = value[k]
|
||||
else:
|
||||
return default
|
||||
|
||||
return value
|
||||
|
||||
def set(self, key: str, value: Any):
|
||||
"""Set configuration value using dot notation"""
|
||||
keys = key.split('.')
|
||||
config = self._config
|
||||
|
||||
# Navigate to the parent dictionary
|
||||
for k in keys[:-1]:
|
||||
if k not in config:
|
||||
config[k] = {}
|
||||
config = config[k]
|
||||
|
||||
# Set the value
|
||||
config[keys[-1]] = value
|
||||
self._save_config()
|
||||
|
||||
def delete(self, key: str) -> bool:
|
||||
"""Delete configuration value"""
|
||||
keys = key.split('.')
|
||||
config = self._config
|
||||
|
||||
# Navigate to the parent dictionary
|
||||
for k in keys[:-1]:
|
||||
if k not in config:
|
||||
return False
|
||||
config = config[k]
|
||||
|
||||
# Delete the key if it exists
|
||||
if keys[-1] in config:
|
||||
del config[keys[-1]]
|
||||
self._save_config()
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def list_keys(self, prefix: str = "") -> list[str]:
|
||||
"""List all configuration keys with optional prefix"""
|
||||
def _get_keys(config: dict, current_prefix: str = "") -> list[str]:
|
||||
keys = []
|
||||
for k, v in config.items():
|
||||
full_key = f"{current_prefix}.{k}" if current_prefix else k
|
||||
if isinstance(v, dict):
|
||||
keys.extend(_get_keys(v, full_key))
|
||||
else:
|
||||
keys.append(full_key)
|
||||
return keys
|
||||
|
||||
all_keys = _get_keys(self._config)
|
||||
|
||||
if prefix:
|
||||
return [k for k in all_keys if k.startswith(prefix)]
|
||||
return all_keys
|
||||
|
||||
def get_api_key(self, provider: str) -> Optional[str]:
|
||||
"""Get API key for a specific provider"""
|
||||
key = self.get(f"providers.{provider}.api_key")
|
||||
|
||||
# Also check environment variables
|
||||
if not key and provider == "openai":
|
||||
key = os.getenv("OPENAI_API_KEY")
|
||||
|
||||
return key
|
||||
|
||||
def get_provider_config(self, provider: str) -> Dict[str, Any]:
|
||||
"""Get complete configuration for a provider"""
|
||||
return self.get(f"providers.{provider}", {})
|
||||
@@ -1,118 +0,0 @@
|
||||
"""AI Fortune system for daily personality variations"""
|
||||
|
||||
import json
|
||||
import random
|
||||
from datetime import date, datetime, timedelta
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
import logging
|
||||
|
||||
from .models import AIFortune
|
||||
|
||||
|
||||
class FortuneSystem:
|
||||
"""Manages daily AI fortune affecting personality"""
|
||||
|
||||
def __init__(self, data_dir: Path):
|
||||
self.data_dir = data_dir
|
||||
self.fortune_file = data_dir / "fortunes.json"
|
||||
self.fortunes: dict[str, AIFortune] = {}
|
||||
self.logger = logging.getLogger(__name__)
|
||||
self._load_fortunes()
|
||||
|
||||
def _load_fortunes(self):
|
||||
"""Load fortune history from storage"""
|
||||
if self.fortune_file.exists():
|
||||
with open(self.fortune_file, 'r', encoding='utf-8') as f:
|
||||
data = json.load(f)
|
||||
for date_str, fortune_data in data.items():
|
||||
# Convert date string back to date object
|
||||
fortune_data['date'] = datetime.fromisoformat(fortune_data['date']).date()
|
||||
self.fortunes[date_str] = AIFortune(**fortune_data)
|
||||
|
||||
def _save_fortunes(self):
|
||||
"""Save fortune history to storage"""
|
||||
data = {}
|
||||
for date_str, fortune in self.fortunes.items():
|
||||
fortune_dict = fortune.model_dump(mode='json')
|
||||
fortune_dict['date'] = fortune.date.isoformat()
|
||||
data[date_str] = fortune_dict
|
||||
|
||||
with open(self.fortune_file, 'w', encoding='utf-8') as f:
|
||||
json.dump(data, f, indent=2)
|
||||
|
||||
def get_today_fortune(self) -> AIFortune:
|
||||
"""Get or generate today's fortune"""
|
||||
today = date.today()
|
||||
today_str = today.isoformat()
|
||||
|
||||
if today_str in self.fortunes:
|
||||
return self.fortunes[today_str]
|
||||
|
||||
# Generate new fortune
|
||||
fortune_value = random.randint(1, 10)
|
||||
|
||||
# Check yesterday's fortune for consecutive tracking
|
||||
yesterday = (today - timedelta(days=1))
|
||||
yesterday_str = yesterday.isoformat()
|
||||
|
||||
consecutive_good = 0
|
||||
consecutive_bad = 0
|
||||
breakthrough_triggered = False
|
||||
|
||||
if yesterday_str in self.fortunes:
|
||||
yesterday_fortune = self.fortunes[yesterday_str]
|
||||
|
||||
if fortune_value >= 7: # Good fortune
|
||||
if yesterday_fortune.fortune_value >= 7:
|
||||
consecutive_good = yesterday_fortune.consecutive_good + 1
|
||||
else:
|
||||
consecutive_good = 1
|
||||
elif fortune_value <= 3: # Bad fortune
|
||||
if yesterday_fortune.fortune_value <= 3:
|
||||
consecutive_bad = yesterday_fortune.consecutive_bad + 1
|
||||
else:
|
||||
consecutive_bad = 1
|
||||
|
||||
# Check breakthrough conditions
|
||||
if consecutive_good >= 3:
|
||||
breakthrough_triggered = True
|
||||
self.logger.info("Breakthrough! 3 consecutive good fortunes!")
|
||||
fortune_value = 10 # Max fortune on breakthrough
|
||||
elif consecutive_bad >= 3:
|
||||
breakthrough_triggered = True
|
||||
self.logger.info("Breakthrough! 3 consecutive bad fortunes!")
|
||||
fortune_value = random.randint(7, 10) # Good fortune after bad streak
|
||||
|
||||
fortune = AIFortune(
|
||||
date=today,
|
||||
fortune_value=fortune_value,
|
||||
consecutive_good=consecutive_good,
|
||||
consecutive_bad=consecutive_bad,
|
||||
breakthrough_triggered=breakthrough_triggered
|
||||
)
|
||||
|
||||
self.fortunes[today_str] = fortune
|
||||
self._save_fortunes()
|
||||
|
||||
self.logger.info(f"Today's fortune: {fortune_value}/10")
|
||||
return fortune
|
||||
|
||||
def get_personality_modifier(self, fortune: AIFortune) -> dict[str, float]:
|
||||
"""Get personality modifiers based on fortune"""
|
||||
base_modifier = fortune.fortune_value / 10.0
|
||||
|
||||
modifiers = {
|
||||
"optimism": base_modifier,
|
||||
"energy": base_modifier * 0.8,
|
||||
"patience": 1.0 - (abs(5.5 - fortune.fortune_value) * 0.1),
|
||||
"creativity": 0.5 + (base_modifier * 0.5),
|
||||
"empathy": 0.7 + (base_modifier * 0.3)
|
||||
}
|
||||
|
||||
# Breakthrough effects
|
||||
if fortune.breakthrough_triggered:
|
||||
modifiers["confidence"] = 1.0
|
||||
modifiers["spontaneity"] = 0.9
|
||||
|
||||
return modifiers
|
||||
@@ -1,511 +0,0 @@
|
||||
"""MCP Server for ai.gpt system"""
|
||||
|
||||
from typing import Optional, List, Dict, Any
|
||||
from fastapi_mcp import FastApiMCP
|
||||
from fastapi import FastAPI
|
||||
from pathlib import Path
|
||||
import logging
|
||||
import subprocess
|
||||
import os
|
||||
import shlex
|
||||
import httpx
|
||||
import json
|
||||
from .ai_provider import create_ai_provider
|
||||
|
||||
from .persona import Persona
|
||||
from .models import Memory, Relationship, PersonaState
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AIGptMcpServer:
|
||||
"""MCP Server that exposes ai.gpt functionality to AI assistants"""
|
||||
|
||||
def __init__(self, data_dir: Path):
|
||||
self.data_dir = data_dir
|
||||
self.persona = Persona(data_dir)
|
||||
|
||||
# Create FastAPI app
|
||||
self.app = FastAPI(
|
||||
title="AI.GPT Memory and Relationship System",
|
||||
description="MCP server for ai.gpt system"
|
||||
)
|
||||
|
||||
# Create MCP server with FastAPI app
|
||||
self.server = FastApiMCP(self.app)
|
||||
|
||||
self._register_tools()
|
||||
|
||||
def _register_tools(self):
|
||||
"""Register all MCP tools"""
|
||||
|
||||
@self.app.get("/get_memories", operation_id="get_memories")
|
||||
async def get_memories(user_id: Optional[str] = None, limit: int = 10) -> List[Dict[str, Any]]:
|
||||
"""Get active memories from the AI's memory system"""
|
||||
memories = self.persona.memory.get_active_memories(limit=limit)
|
||||
return [
|
||||
{
|
||||
"id": mem.id,
|
||||
"content": mem.content,
|
||||
"level": mem.level.value,
|
||||
"importance": mem.importance_score,
|
||||
"is_core": mem.is_core,
|
||||
"timestamp": mem.timestamp.isoformat()
|
||||
}
|
||||
for mem in memories
|
||||
]
|
||||
|
||||
@self.app.get("/get_contextual_memories", operation_id="get_contextual_memories")
|
||||
async def get_contextual_memories(query: str = "", limit: int = 10) -> Dict[str, List[Dict[str, Any]]]:
|
||||
"""Get memories organized by priority with contextual relevance"""
|
||||
memory_groups = self.persona.memory.get_contextual_memories(query=query, limit=limit)
|
||||
|
||||
result = {}
|
||||
for group_name, memories in memory_groups.items():
|
||||
result[group_name] = [
|
||||
{
|
||||
"id": mem.id,
|
||||
"content": mem.content,
|
||||
"level": mem.level.value,
|
||||
"importance": mem.importance_score,
|
||||
"is_core": mem.is_core,
|
||||
"timestamp": mem.timestamp.isoformat(),
|
||||
"summary": mem.summary,
|
||||
"metadata": mem.metadata
|
||||
}
|
||||
for mem in memories
|
||||
]
|
||||
return result
|
||||
|
||||
@self.app.post("/search_memories", operation_id="search_memories")
|
||||
async def search_memories(keywords: List[str], memory_types: Optional[List[str]] = None) -> List[Dict[str, Any]]:
|
||||
"""Search memories by keywords and optionally filter by memory types"""
|
||||
from .models import MemoryLevel
|
||||
|
||||
# Convert string memory types to enum if provided
|
||||
level_filter = None
|
||||
if memory_types:
|
||||
level_filter = []
|
||||
for mt in memory_types:
|
||||
try:
|
||||
level_filter.append(MemoryLevel(mt))
|
||||
except ValueError:
|
||||
pass # Skip invalid memory types
|
||||
|
||||
memories = self.persona.memory.search_memories(keywords, memory_types=level_filter)
|
||||
return [
|
||||
{
|
||||
"id": mem.id,
|
||||
"content": mem.content,
|
||||
"level": mem.level.value,
|
||||
"importance": mem.importance_score,
|
||||
"is_core": mem.is_core,
|
||||
"timestamp": mem.timestamp.isoformat(),
|
||||
"summary": mem.summary,
|
||||
"metadata": mem.metadata
|
||||
}
|
||||
for mem in memories
|
||||
]
|
||||
|
||||
@self.app.post("/create_summary", operation_id="create_summary")
|
||||
async def create_summary(user_id: str) -> Dict[str, Any]:
|
||||
"""Create an AI-powered summary of recent memories"""
|
||||
try:
|
||||
ai_provider = create_ai_provider()
|
||||
summary = self.persona.memory.create_smart_summary(user_id, ai_provider=ai_provider)
|
||||
|
||||
if summary:
|
||||
return {
|
||||
"success": True,
|
||||
"summary": {
|
||||
"id": summary.id,
|
||||
"content": summary.content,
|
||||
"level": summary.level.value,
|
||||
"importance": summary.importance_score,
|
||||
"timestamp": summary.timestamp.isoformat(),
|
||||
"metadata": summary.metadata
|
||||
}
|
||||
}
|
||||
else:
|
||||
return {"success": False, "reason": "Not enough memories to summarize"}
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to create summary: {e}")
|
||||
return {"success": False, "reason": str(e)}
|
||||
|
||||
@self.app.post("/create_core_memory", operation_id="create_core_memory")
|
||||
async def create_core_memory() -> Dict[str, Any]:
|
||||
"""Create a core memory by analyzing all existing memories"""
|
||||
try:
|
||||
ai_provider = create_ai_provider()
|
||||
core_memory = self.persona.memory.create_core_memory(ai_provider=ai_provider)
|
||||
|
||||
if core_memory:
|
||||
return {
|
||||
"success": True,
|
||||
"core_memory": {
|
||||
"id": core_memory.id,
|
||||
"content": core_memory.content,
|
||||
"level": core_memory.level.value,
|
||||
"importance": core_memory.importance_score,
|
||||
"timestamp": core_memory.timestamp.isoformat(),
|
||||
"metadata": core_memory.metadata
|
||||
}
|
||||
}
|
||||
else:
|
||||
return {"success": False, "reason": "Not enough memories to create core memory"}
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to create core memory: {e}")
|
||||
return {"success": False, "reason": str(e)}
|
||||
|
||||
@self.app.get("/get_relationship", operation_id="get_relationship")
|
||||
async def get_relationship(user_id: str) -> Dict[str, Any]:
|
||||
"""Get relationship status with a specific user"""
|
||||
rel = self.persona.relationships.get_or_create_relationship(user_id)
|
||||
return {
|
||||
"user_id": rel.user_id,
|
||||
"status": rel.status.value,
|
||||
"score": rel.score,
|
||||
"transmission_enabled": rel.transmission_enabled,
|
||||
"is_broken": rel.is_broken,
|
||||
"total_interactions": rel.total_interactions,
|
||||
"last_interaction": rel.last_interaction.isoformat() if rel.last_interaction else None
|
||||
}
|
||||
|
||||
@self.app.get("/get_all_relationships", operation_id="get_all_relationships")
|
||||
async def get_all_relationships() -> List[Dict[str, Any]]:
|
||||
"""Get all relationships"""
|
||||
relationships = []
|
||||
for user_id, rel in self.persona.relationships.relationships.items():
|
||||
relationships.append({
|
||||
"user_id": user_id,
|
||||
"status": rel.status.value,
|
||||
"score": rel.score,
|
||||
"transmission_enabled": rel.transmission_enabled,
|
||||
"is_broken": rel.is_broken
|
||||
})
|
||||
return relationships
|
||||
|
||||
@self.app.get("/get_persona_state", operation_id="get_persona_state")
|
||||
async def get_persona_state() -> Dict[str, Any]:
|
||||
"""Get current persona state including fortune and mood"""
|
||||
state = self.persona.get_current_state()
|
||||
return {
|
||||
"mood": state.current_mood,
|
||||
"fortune": {
|
||||
"value": state.fortune.fortune_value,
|
||||
"date": state.fortune.date.isoformat(),
|
||||
"breakthrough": state.fortune.breakthrough_triggered
|
||||
},
|
||||
"personality": state.base_personality,
|
||||
"active_memory_count": len(state.active_memories)
|
||||
}
|
||||
|
||||
@self.app.post("/get_context_prompt", operation_id="get_context_prompt")
|
||||
async def get_context_prompt(user_id: str, message: str) -> Dict[str, Any]:
|
||||
"""Get context-aware prompt for AI response generation"""
|
||||
try:
|
||||
context_prompt = self.persona.build_context_prompt(user_id, message)
|
||||
return {
|
||||
"success": True,
|
||||
"context_prompt": context_prompt,
|
||||
"user_id": user_id,
|
||||
"message": message
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to build context prompt: {e}")
|
||||
return {"success": False, "reason": str(e)}
|
||||
|
||||
@self.app.post("/process_interaction", operation_id="process_interaction")
|
||||
async def process_interaction(user_id: str, message: str) -> Dict[str, Any]:
|
||||
"""Process an interaction with a user"""
|
||||
response, relationship_delta = self.persona.process_interaction(user_id, message)
|
||||
rel = self.persona.relationships.get_or_create_relationship(user_id)
|
||||
|
||||
return {
|
||||
"response": response,
|
||||
"relationship_delta": relationship_delta,
|
||||
"new_relationship_score": rel.score,
|
||||
"transmission_enabled": rel.transmission_enabled,
|
||||
"relationship_status": rel.status.value
|
||||
}
|
||||
|
||||
@self.app.get("/check_transmission_eligibility", operation_id="check_transmission_eligibility")
|
||||
async def check_transmission_eligibility(user_id: str) -> Dict[str, Any]:
|
||||
"""Check if AI can transmit to a specific user"""
|
||||
can_transmit = self.persona.can_transmit_to(user_id)
|
||||
rel = self.persona.relationships.get_or_create_relationship(user_id)
|
||||
|
||||
return {
|
||||
"can_transmit": can_transmit,
|
||||
"relationship_score": rel.score,
|
||||
"threshold": rel.threshold,
|
||||
"is_broken": rel.is_broken,
|
||||
"transmission_enabled": rel.transmission_enabled
|
||||
}
|
||||
|
||||
@self.app.get("/get_fortune", operation_id="get_fortune")
|
||||
async def get_fortune() -> Dict[str, Any]:
|
||||
"""Get today's AI fortune"""
|
||||
fortune = self.persona.fortune_system.get_today_fortune()
|
||||
modifiers = self.persona.fortune_system.get_personality_modifier(fortune)
|
||||
|
||||
return {
|
||||
"value": fortune.fortune_value,
|
||||
"date": fortune.date.isoformat(),
|
||||
"consecutive_good": fortune.consecutive_good,
|
||||
"consecutive_bad": fortune.consecutive_bad,
|
||||
"breakthrough": fortune.breakthrough_triggered,
|
||||
"personality_modifiers": modifiers
|
||||
}
|
||||
|
||||
@self.app.post("/summarize_memories", operation_id="summarize_memories")
|
||||
async def summarize_memories(user_id: str) -> Optional[Dict[str, Any]]:
|
||||
"""Create a summary of recent memories for a user"""
|
||||
summary = self.persona.memory.summarize_memories(user_id)
|
||||
if summary:
|
||||
return {
|
||||
"id": summary.id,
|
||||
"content": summary.content,
|
||||
"level": summary.level.value,
|
||||
"timestamp": summary.timestamp.isoformat()
|
||||
}
|
||||
return None
|
||||
|
||||
@self.app.post("/run_maintenance", operation_id="run_maintenance")
|
||||
async def run_maintenance() -> Dict[str, str]:
|
||||
"""Run daily maintenance tasks"""
|
||||
self.persona.daily_maintenance()
|
||||
return {"status": "Maintenance completed successfully"}
|
||||
|
||||
# Shell integration tools (ai.shell)
|
||||
@self.app.post("/execute_command", operation_id="execute_command")
|
||||
async def execute_command(command: str, working_dir: str = ".") -> Dict[str, Any]:
|
||||
"""Execute a shell command"""
|
||||
try:
|
||||
result = subprocess.run(
|
||||
shlex.split(command),
|
||||
cwd=working_dir,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=60
|
||||
)
|
||||
|
||||
return {
|
||||
"status": "success" if result.returncode == 0 else "error",
|
||||
"returncode": result.returncode,
|
||||
"stdout": result.stdout,
|
||||
"stderr": result.stderr,
|
||||
"command": command
|
||||
}
|
||||
except subprocess.TimeoutExpired:
|
||||
return {"error": "Command timed out"}
|
||||
except Exception as e:
|
||||
return {"error": str(e)}
|
||||
|
||||
@self.app.post("/analyze_file", operation_id="analyze_file")
|
||||
async def analyze_file(file_path: str, analysis_prompt: str = "Analyze this file") -> Dict[str, Any]:
|
||||
"""Analyze a file using AI"""
|
||||
try:
|
||||
if not os.path.exists(file_path):
|
||||
return {"error": f"File not found: {file_path}"}
|
||||
|
||||
with open(file_path, 'r', encoding='utf-8') as f:
|
||||
content = f.read()
|
||||
|
||||
# Get AI provider from app state
|
||||
ai_provider = getattr(self.app.state, 'ai_provider', 'ollama')
|
||||
ai_model = getattr(self.app.state, 'ai_model', 'qwen2.5')
|
||||
|
||||
provider = create_ai_provider(ai_provider, ai_model)
|
||||
|
||||
# Analyze with AI
|
||||
prompt = f"{analysis_prompt}\n\nFile: {file_path}\n\nContent:\n{content}"
|
||||
analysis = provider.generate_response(prompt, "You are a code analyst.")
|
||||
|
||||
return {
|
||||
"analysis": analysis,
|
||||
"file_path": file_path,
|
||||
"file_size": len(content),
|
||||
"line_count": len(content.split('\n'))
|
||||
}
|
||||
except Exception as e:
|
||||
return {"error": str(e)}
|
||||
|
||||
@self.app.post("/write_file", operation_id="write_file")
|
||||
async def write_file(file_path: str, content: str, backup: bool = True) -> Dict[str, Any]:
|
||||
"""Write content to a file"""
|
||||
try:
|
||||
file_path_obj = Path(file_path)
|
||||
|
||||
# Create backup if requested
|
||||
backup_path = None
|
||||
if backup and file_path_obj.exists():
|
||||
backup_path = f"{file_path}.backup"
|
||||
with open(file_path, 'r', encoding='utf-8') as src:
|
||||
with open(backup_path, 'w', encoding='utf-8') as dst:
|
||||
dst.write(src.read())
|
||||
|
||||
# Write file
|
||||
file_path_obj.parent.mkdir(parents=True, exist_ok=True)
|
||||
with open(file_path, 'w', encoding='utf-8') as f:
|
||||
f.write(content)
|
||||
|
||||
return {
|
||||
"status": "success",
|
||||
"file_path": file_path,
|
||||
"backup_path": backup_path,
|
||||
"bytes_written": len(content.encode('utf-8'))
|
||||
}
|
||||
except Exception as e:
|
||||
return {"error": str(e)}
|
||||
|
||||
@self.app.get("/read_project_file", operation_id="read_project_file")
|
||||
async def read_project_file(file_name: str = "aishell.md") -> Dict[str, Any]:
|
||||
"""Read project files like aishell.md (similar to claude.md)"""
|
||||
try:
|
||||
# Check common locations
|
||||
search_paths = [
|
||||
Path.cwd() / file_name,
|
||||
Path.cwd() / "docs" / file_name,
|
||||
self.data_dir.parent / file_name,
|
||||
]
|
||||
|
||||
for path in search_paths:
|
||||
if path.exists():
|
||||
with open(path, 'r', encoding='utf-8') as f:
|
||||
content = f.read()
|
||||
return {
|
||||
"content": content,
|
||||
"path": str(path),
|
||||
"exists": True
|
||||
}
|
||||
|
||||
return {
|
||||
"exists": False,
|
||||
"searched_paths": [str(p) for p in search_paths],
|
||||
"error": f"{file_name} not found"
|
||||
}
|
||||
except Exception as e:
|
||||
return {"error": str(e)}
|
||||
|
||||
@self.app.get("/list_files", operation_id="list_files")
|
||||
async def list_files(directory: str = ".", pattern: str = "*") -> Dict[str, Any]:
|
||||
"""List files in a directory"""
|
||||
try:
|
||||
dir_path = Path(directory)
|
||||
if not dir_path.exists():
|
||||
return {"error": f"Directory not found: {directory}"}
|
||||
|
||||
files = []
|
||||
for item in dir_path.glob(pattern):
|
||||
files.append({
|
||||
"name": item.name,
|
||||
"path": str(item),
|
||||
"is_file": item.is_file(),
|
||||
"is_dir": item.is_dir(),
|
||||
"size": item.stat().st_size if item.is_file() else None
|
||||
})
|
||||
|
||||
return {
|
||||
"directory": directory,
|
||||
"pattern": pattern,
|
||||
"files": files,
|
||||
"count": len(files)
|
||||
}
|
||||
except Exception as e:
|
||||
return {"error": str(e)}
|
||||
|
||||
# ai.bot integration tools
|
||||
@self.app.post("/remote_shell", operation_id="remote_shell")
|
||||
async def remote_shell(command: str, ai_bot_url: str = "http://localhost:8080") -> Dict[str, Any]:
|
||||
"""Execute command via ai.bot /sh functionality (systemd-nspawn isolated execution)"""
|
||||
try:
|
||||
async with httpx.AsyncClient(timeout=30.0) as client:
|
||||
# ai.bot の /sh エンドポイントに送信
|
||||
response = await client.post(
|
||||
f"{ai_bot_url}/sh",
|
||||
json={"command": command},
|
||||
headers={"Content-Type": "application/json"}
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
return {
|
||||
"status": "success",
|
||||
"command": command,
|
||||
"output": result.get("output", ""),
|
||||
"error": result.get("error", ""),
|
||||
"exit_code": result.get("exit_code", 0),
|
||||
"execution_time": result.get("execution_time", ""),
|
||||
"container_id": result.get("container_id", ""),
|
||||
"isolated": True # systemd-nspawn isolation
|
||||
}
|
||||
else:
|
||||
return {
|
||||
"status": "error",
|
||||
"error": f"ai.bot responded with status {response.status_code}",
|
||||
"response_text": response.text
|
||||
}
|
||||
except httpx.TimeoutException:
|
||||
return {"status": "error", "error": "Request to ai.bot timed out"}
|
||||
except Exception as e:
|
||||
return {"status": "error", "error": f"Failed to connect to ai.bot: {str(e)}"}
|
||||
|
||||
@self.app.get("/ai_bot_status", operation_id="ai_bot_status")
|
||||
async def ai_bot_status(ai_bot_url: str = "http://localhost:8080") -> Dict[str, Any]:
|
||||
"""Check ai.bot server status and available commands"""
|
||||
try:
|
||||
async with httpx.AsyncClient(timeout=10.0) as client:
|
||||
response = await client.get(f"{ai_bot_url}/status")
|
||||
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
return {
|
||||
"status": "online",
|
||||
"ai_bot_url": ai_bot_url,
|
||||
"server_info": result,
|
||||
"shell_available": True
|
||||
}
|
||||
else:
|
||||
return {
|
||||
"status": "error",
|
||||
"error": f"ai.bot status check failed: {response.status_code}"
|
||||
}
|
||||
except Exception as e:
|
||||
return {
|
||||
"status": "offline",
|
||||
"error": f"Cannot connect to ai.bot: {str(e)}",
|
||||
"ai_bot_url": ai_bot_url
|
||||
}
|
||||
|
||||
@self.app.post("/isolated_python", operation_id="isolated_python")
|
||||
async def isolated_python(code: str, ai_bot_url: str = "http://localhost:8080") -> Dict[str, Any]:
|
||||
"""Execute Python code in isolated ai.bot environment"""
|
||||
# Python コードを /sh 経由で実行
|
||||
python_command = f'python3 -c "{code.replace('"', '\\"')}"'
|
||||
return await remote_shell(python_command, ai_bot_url)
|
||||
|
||||
@self.app.post("/isolated_analysis", operation_id="isolated_analysis")
|
||||
async def isolated_analysis(file_path: str, analysis_type: str = "structure", ai_bot_url: str = "http://localhost:8080") -> Dict[str, Any]:
|
||||
"""Perform code analysis in isolated environment"""
|
||||
if analysis_type == "structure":
|
||||
command = f"find {file_path} -type f -name '*.py' | head -20"
|
||||
elif analysis_type == "lines":
|
||||
command = f"wc -l {file_path}"
|
||||
elif analysis_type == "syntax":
|
||||
command = f"python3 -m py_compile {file_path}"
|
||||
else:
|
||||
command = f"file {file_path}"
|
||||
|
||||
return await remote_shell(command, ai_bot_url)
|
||||
|
||||
# Mount MCP server
|
||||
self.server.mount()
|
||||
|
||||
def get_server(self) -> FastApiMCP:
|
||||
"""Get the FastAPI MCP server instance"""
|
||||
return self.server
|
||||
|
||||
async def close(self):
|
||||
"""Cleanup resources"""
|
||||
pass
|
||||
@@ -1,146 +0,0 @@
|
||||
"""Simple MCP Server implementation for ai.gpt"""
|
||||
|
||||
from mcp import Server
|
||||
from mcp.types import Tool, TextContent
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
import json
|
||||
|
||||
from .persona import Persona
|
||||
from .ai_provider import create_ai_provider
|
||||
import subprocess
|
||||
import os
|
||||
|
||||
|
||||
def create_mcp_server(data_dir: Path, enable_card: bool = False) -> Server:
|
||||
"""Create MCP server with ai.gpt tools"""
|
||||
server = Server("aigpt")
|
||||
persona = Persona(data_dir)
|
||||
|
||||
@server.tool()
|
||||
async def get_memories(limit: int = 10) -> List[Dict[str, Any]]:
|
||||
"""Get active memories from the AI's memory system"""
|
||||
memories = persona.memory.get_active_memories(limit=limit)
|
||||
return [
|
||||
{
|
||||
"id": mem.id,
|
||||
"content": mem.content,
|
||||
"level": mem.level.value,
|
||||
"importance": mem.importance_score,
|
||||
"is_core": mem.is_core,
|
||||
"timestamp": mem.timestamp.isoformat()
|
||||
}
|
||||
for mem in memories
|
||||
]
|
||||
|
||||
@server.tool()
|
||||
async def get_relationship(user_id: str) -> Dict[str, Any]:
|
||||
"""Get relationship status with a specific user"""
|
||||
rel = persona.relationships.get_or_create_relationship(user_id)
|
||||
return {
|
||||
"user_id": rel.user_id,
|
||||
"status": rel.status.value,
|
||||
"score": rel.score,
|
||||
"transmission_enabled": rel.transmission_enabled,
|
||||
"is_broken": rel.is_broken,
|
||||
"total_interactions": rel.total_interactions,
|
||||
"last_interaction": rel.last_interaction.isoformat() if rel.last_interaction else None
|
||||
}
|
||||
|
||||
@server.tool()
|
||||
async def process_interaction(user_id: str, message: str, provider: str = "ollama", model: str = "qwen2.5") -> Dict[str, Any]:
|
||||
"""Process an interaction with a user"""
|
||||
ai_provider = create_ai_provider(provider, model)
|
||||
response, relationship_delta = persona.process_interaction(user_id, message, ai_provider)
|
||||
rel = persona.relationships.get_or_create_relationship(user_id)
|
||||
|
||||
return {
|
||||
"response": response,
|
||||
"relationship_delta": relationship_delta,
|
||||
"new_relationship_score": rel.score,
|
||||
"transmission_enabled": rel.transmission_enabled,
|
||||
"relationship_status": rel.status.value
|
||||
}
|
||||
|
||||
@server.tool()
|
||||
async def get_fortune() -> Dict[str, Any]:
|
||||
"""Get today's AI fortune"""
|
||||
fortune = persona.fortune_system.get_today_fortune()
|
||||
modifiers = persona.fortune_system.get_personality_modifier(fortune)
|
||||
|
||||
return {
|
||||
"value": fortune.fortune_value,
|
||||
"date": fortune.date.isoformat(),
|
||||
"consecutive_good": fortune.consecutive_good,
|
||||
"consecutive_bad": fortune.consecutive_bad,
|
||||
"breakthrough": fortune.breakthrough_triggered,
|
||||
"personality_modifiers": modifiers
|
||||
}
|
||||
|
||||
@server.tool()
|
||||
async def execute_command(command: str, working_dir: str = ".") -> Dict[str, Any]:
|
||||
"""Execute a shell command"""
|
||||
try:
|
||||
import shlex
|
||||
result = subprocess.run(
|
||||
shlex.split(command),
|
||||
cwd=working_dir,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=60
|
||||
)
|
||||
|
||||
return {
|
||||
"status": "success" if result.returncode == 0 else "error",
|
||||
"returncode": result.returncode,
|
||||
"stdout": result.stdout,
|
||||
"stderr": result.stderr,
|
||||
"command": command
|
||||
}
|
||||
except subprocess.TimeoutExpired:
|
||||
return {"error": "Command timed out"}
|
||||
except Exception as e:
|
||||
return {"error": str(e)}
|
||||
|
||||
@server.tool()
|
||||
async def analyze_file(file_path: str) -> Dict[str, Any]:
|
||||
"""Analyze a file using AI"""
|
||||
try:
|
||||
if not os.path.exists(file_path):
|
||||
return {"error": f"File not found: {file_path}"}
|
||||
|
||||
with open(file_path, 'r', encoding='utf-8') as f:
|
||||
content = f.read()
|
||||
|
||||
ai_provider = create_ai_provider("ollama", "qwen2.5")
|
||||
|
||||
prompt = f"Analyze this file and provide insights:\\n\\nFile: {file_path}\\n\\nContent:\\n{content[:2000]}"
|
||||
analysis = ai_provider.generate_response(prompt, "You are a code analyst.")
|
||||
|
||||
return {
|
||||
"analysis": analysis,
|
||||
"file_path": file_path,
|
||||
"file_size": len(content),
|
||||
"line_count": len(content.split('\\n'))
|
||||
}
|
||||
except Exception as e:
|
||||
return {"error": str(e)}
|
||||
|
||||
return server
|
||||
|
||||
|
||||
async def main():
|
||||
"""Run MCP server"""
|
||||
import sys
|
||||
from mcp import stdio_server
|
||||
|
||||
data_dir = Path.home() / ".config" / "syui" / "ai" / "gpt" / "data"
|
||||
data_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
server = create_mcp_server(data_dir)
|
||||
await stdio_server(server)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import asyncio
|
||||
asyncio.run(main())
|
||||
@@ -1,408 +0,0 @@
|
||||
"""Memory management system for ai.gpt"""
|
||||
|
||||
import json
|
||||
import hashlib
|
||||
from datetime import datetime, timedelta
|
||||
from pathlib import Path
|
||||
from typing import List, Optional, Dict, Any
|
||||
import logging
|
||||
|
||||
from .models import Memory, MemoryLevel, Conversation
|
||||
|
||||
|
||||
class MemoryManager:
|
||||
"""Manages AI's memory with hierarchical storage and forgetting"""
|
||||
|
||||
def __init__(self, data_dir: Path):
|
||||
self.data_dir = data_dir
|
||||
self.memories_file = data_dir / "memories.json"
|
||||
self.conversations_file = data_dir / "conversations.json"
|
||||
self.memories: Dict[str, Memory] = {}
|
||||
self.conversations: List[Conversation] = []
|
||||
self.logger = logging.getLogger(__name__)
|
||||
self._load_memories()
|
||||
|
||||
def _load_memories(self):
|
||||
"""Load memories from persistent storage"""
|
||||
if self.memories_file.exists():
|
||||
with open(self.memories_file, 'r', encoding='utf-8') as f:
|
||||
data = json.load(f)
|
||||
for mem_data in data:
|
||||
memory = Memory(**mem_data)
|
||||
self.memories[memory.id] = memory
|
||||
|
||||
if self.conversations_file.exists():
|
||||
with open(self.conversations_file, 'r', encoding='utf-8') as f:
|
||||
data = json.load(f)
|
||||
self.conversations = [Conversation(**conv) for conv in data]
|
||||
|
||||
def _save_memories(self):
|
||||
"""Save memories to persistent storage"""
|
||||
memories_data = [mem.model_dump(mode='json') for mem in self.memories.values()]
|
||||
with open(self.memories_file, 'w', encoding='utf-8') as f:
|
||||
json.dump(memories_data, f, indent=2, default=str)
|
||||
|
||||
conv_data = [conv.model_dump(mode='json') for conv in self.conversations]
|
||||
with open(self.conversations_file, 'w', encoding='utf-8') as f:
|
||||
json.dump(conv_data, f, indent=2, default=str)
|
||||
|
||||
def add_conversation(self, conversation: Conversation) -> Memory:
|
||||
"""Add a conversation and create memory from it"""
|
||||
self.conversations.append(conversation)
|
||||
|
||||
# Create memory from conversation
|
||||
memory_id = hashlib.sha256(
|
||||
f"{conversation.id}{conversation.timestamp}".encode()
|
||||
).hexdigest()[:16]
|
||||
|
||||
memory = Memory(
|
||||
id=memory_id,
|
||||
timestamp=conversation.timestamp,
|
||||
content=f"User: {conversation.user_message}\nAI: {conversation.ai_response}",
|
||||
level=MemoryLevel.FULL_LOG,
|
||||
importance_score=abs(conversation.relationship_delta) * 0.1
|
||||
)
|
||||
|
||||
self.memories[memory.id] = memory
|
||||
self._save_memories()
|
||||
return memory
|
||||
|
||||
def add_memory(self, memory: Memory):
|
||||
"""Add a memory directly to the system"""
|
||||
self.memories[memory.id] = memory
|
||||
self._save_memories()
|
||||
|
||||
def create_smart_summary(self, user_id: str, ai_provider=None) -> Optional[Memory]:
|
||||
"""Create AI-powered thematic summary from recent memories"""
|
||||
recent_memories = [
|
||||
mem for mem in self.memories.values()
|
||||
if mem.level == MemoryLevel.FULL_LOG
|
||||
and (datetime.now() - mem.timestamp).days < 7
|
||||
]
|
||||
|
||||
if len(recent_memories) < 5:
|
||||
return None
|
||||
|
||||
# Sort by timestamp for chronological analysis
|
||||
recent_memories.sort(key=lambda m: m.timestamp)
|
||||
|
||||
# Prepare conversation context for AI analysis
|
||||
conversations_text = "\n\n".join([
|
||||
f"[{mem.timestamp.strftime('%Y-%m-%d %H:%M')}] {mem.content}"
|
||||
for mem in recent_memories
|
||||
])
|
||||
|
||||
summary_prompt = f"""
|
||||
Analyze these recent conversations and create a thematic summary focusing on:
|
||||
1. Communication patterns and user preferences
|
||||
2. Technical topics and problem-solving approaches
|
||||
3. Relationship progression and trust level
|
||||
4. Key recurring themes and interests
|
||||
|
||||
Conversations:
|
||||
{conversations_text}
|
||||
|
||||
Create a concise summary (2-3 sentences) that captures the essence of this interaction period:
|
||||
"""
|
||||
|
||||
try:
|
||||
if ai_provider:
|
||||
summary_content = ai_provider.chat(summary_prompt, max_tokens=200)
|
||||
else:
|
||||
# Fallback to pattern-based analysis
|
||||
themes = self._extract_themes(recent_memories)
|
||||
summary_content = f"Themes: {', '.join(themes[:3])}. {len(recent_memories)} interactions with focus on technical discussions."
|
||||
except Exception as e:
|
||||
self.logger.warning(f"AI summary failed, using fallback: {e}")
|
||||
themes = self._extract_themes(recent_memories)
|
||||
summary_content = f"Themes: {', '.join(themes[:3])}. {len(recent_memories)} interactions."
|
||||
|
||||
summary_id = hashlib.sha256(
|
||||
f"summary_{datetime.now().isoformat()}".encode()
|
||||
).hexdigest()[:16]
|
||||
|
||||
summary = Memory(
|
||||
id=summary_id,
|
||||
timestamp=datetime.now(),
|
||||
content=f"SUMMARY ({len(recent_memories)} conversations): {summary_content}",
|
||||
summary=summary_content,
|
||||
level=MemoryLevel.SUMMARY,
|
||||
importance_score=0.6,
|
||||
metadata={
|
||||
"memory_count": len(recent_memories),
|
||||
"time_span": f"{recent_memories[0].timestamp.date()} to {recent_memories[-1].timestamp.date()}",
|
||||
"themes": self._extract_themes(recent_memories)[:5]
|
||||
}
|
||||
)
|
||||
|
||||
self.memories[summary.id] = summary
|
||||
|
||||
# Reduce importance of summarized memories
|
||||
for mem in recent_memories:
|
||||
mem.importance_score *= 0.8
|
||||
|
||||
self._save_memories()
|
||||
return summary
|
||||
|
||||
def _extract_themes(self, memories: List[Memory]) -> List[str]:
|
||||
"""Extract common themes from memory content"""
|
||||
common_words = {}
|
||||
for memory in memories:
|
||||
# Simple keyword extraction
|
||||
words = memory.content.lower().split()
|
||||
for word in words:
|
||||
if len(word) > 4 and word.isalpha():
|
||||
common_words[word] = common_words.get(word, 0) + 1
|
||||
|
||||
# Return most frequent meaningful words
|
||||
return sorted(common_words.keys(), key=common_words.get, reverse=True)[:10]
|
||||
|
||||
def create_core_memory(self, ai_provider=None) -> Optional[Memory]:
|
||||
"""Analyze all memories to extract core personality-forming elements"""
|
||||
# Collect all non-forgotten memories for analysis
|
||||
all_memories = [
|
||||
mem for mem in self.memories.values()
|
||||
if mem.level != MemoryLevel.FORGOTTEN
|
||||
]
|
||||
|
||||
if len(all_memories) < 10:
|
||||
return None
|
||||
|
||||
# Sort by importance and timestamp for comprehensive analysis
|
||||
all_memories.sort(key=lambda m: (m.importance_score, m.timestamp), reverse=True)
|
||||
|
||||
# Prepare memory context for AI analysis
|
||||
memory_context = "\n".join([
|
||||
f"[{mem.level.value}] {mem.timestamp.strftime('%Y-%m-%d')}: {mem.content[:200]}..."
|
||||
for mem in all_memories[:20] # Top 20 memories
|
||||
])
|
||||
|
||||
core_prompt = f"""
|
||||
Analyze these conversations and memories to identify core personality elements that define this user relationship:
|
||||
|
||||
1. Communication style and preferences
|
||||
2. Core values and principles
|
||||
3. Problem-solving patterns
|
||||
4. Trust level and relationship depth
|
||||
5. Unique characteristics that make this relationship special
|
||||
|
||||
Memories:
|
||||
{memory_context}
|
||||
|
||||
Extract the essential personality-forming elements (2-3 sentences) that should NEVER be forgotten:
|
||||
"""
|
||||
|
||||
try:
|
||||
if ai_provider:
|
||||
core_content = ai_provider.chat(core_prompt, max_tokens=150)
|
||||
else:
|
||||
# Fallback to pattern analysis
|
||||
user_patterns = self._analyze_user_patterns(all_memories)
|
||||
core_content = f"User shows {user_patterns['communication_style']} communication, focuses on {user_patterns['main_interests']}, and demonstrates {user_patterns['problem_solving']} approach."
|
||||
except Exception as e:
|
||||
self.logger.warning(f"AI core analysis failed, using fallback: {e}")
|
||||
user_patterns = self._analyze_user_patterns(all_memories)
|
||||
core_content = f"Core pattern: {user_patterns['communication_style']} style, {user_patterns['main_interests']} interests."
|
||||
|
||||
# Create core memory
|
||||
core_id = hashlib.sha256(
|
||||
f"core_{datetime.now().isoformat()}".encode()
|
||||
).hexdigest()[:16]
|
||||
|
||||
core_memory = Memory(
|
||||
id=core_id,
|
||||
timestamp=datetime.now(),
|
||||
content=f"CORE PERSONALITY: {core_content}",
|
||||
summary=core_content,
|
||||
level=MemoryLevel.CORE,
|
||||
importance_score=1.0,
|
||||
is_core=True,
|
||||
metadata={
|
||||
"source_memories": len(all_memories),
|
||||
"analysis_date": datetime.now().isoformat(),
|
||||
"patterns": self._analyze_user_patterns(all_memories)
|
||||
}
|
||||
)
|
||||
|
||||
self.memories[core_memory.id] = core_memory
|
||||
self._save_memories()
|
||||
|
||||
self.logger.info(f"Core memory created: {core_id}")
|
||||
return core_memory
|
||||
|
||||
def _analyze_user_patterns(self, memories: List[Memory]) -> Dict[str, str]:
|
||||
"""Analyze patterns in user behavior from memories"""
|
||||
# Extract patterns from conversation content
|
||||
all_content = " ".join([mem.content.lower() for mem in memories])
|
||||
|
||||
# Simple pattern detection
|
||||
communication_indicators = {
|
||||
"technical": ["code", "implementation", "system", "api", "database"],
|
||||
"casual": ["thanks", "please", "sorry", "help"],
|
||||
"formal": ["could", "would", "should", "proper"]
|
||||
}
|
||||
|
||||
problem_solving_indicators = {
|
||||
"systematic": ["first", "then", "next", "step", "plan"],
|
||||
"experimental": ["try", "test", "experiment", "see"],
|
||||
"theoretical": ["concept", "design", "architecture", "pattern"]
|
||||
}
|
||||
|
||||
# Score each pattern
|
||||
communication_style = max(
|
||||
communication_indicators.keys(),
|
||||
key=lambda style: sum(all_content.count(word) for word in communication_indicators[style])
|
||||
)
|
||||
|
||||
problem_solving = max(
|
||||
problem_solving_indicators.keys(),
|
||||
key=lambda style: sum(all_content.count(word) for word in problem_solving_indicators[style])
|
||||
)
|
||||
|
||||
# Extract main interests from themes
|
||||
themes = self._extract_themes(memories)
|
||||
main_interests = ", ".join(themes[:3]) if themes else "general technology"
|
||||
|
||||
return {
|
||||
"communication_style": communication_style,
|
||||
"problem_solving": problem_solving,
|
||||
"main_interests": main_interests,
|
||||
"interaction_count": len(memories)
|
||||
}
|
||||
|
||||
def identify_core_memories(self) -> List[Memory]:
|
||||
"""Identify existing memories that should become core (legacy method)"""
|
||||
core_candidates = [
|
||||
mem for mem in self.memories.values()
|
||||
if mem.importance_score > 0.8
|
||||
and not mem.is_core
|
||||
and mem.level != MemoryLevel.FORGOTTEN
|
||||
]
|
||||
|
||||
for memory in core_candidates:
|
||||
memory.is_core = True
|
||||
memory.level = MemoryLevel.CORE
|
||||
self.logger.info(f"Memory {memory.id} promoted to core")
|
||||
|
||||
self._save_memories()
|
||||
return core_candidates
|
||||
|
||||
def apply_forgetting(self):
|
||||
"""Apply selective forgetting based on importance and time"""
|
||||
now = datetime.now()
|
||||
|
||||
for memory in self.memories.values():
|
||||
if memory.is_core or memory.level == MemoryLevel.FORGOTTEN:
|
||||
continue
|
||||
|
||||
# Time-based decay
|
||||
age_days = (now - memory.timestamp).days
|
||||
decay_factor = memory.decay_rate * age_days
|
||||
memory.importance_score -= decay_factor
|
||||
|
||||
# Forget unimportant old memories
|
||||
if memory.importance_score <= 0.1 and age_days > 30:
|
||||
memory.level = MemoryLevel.FORGOTTEN
|
||||
self.logger.info(f"Memory {memory.id} forgotten")
|
||||
|
||||
self._save_memories()
|
||||
|
||||
def get_active_memories(self, limit: int = 10) -> List[Memory]:
|
||||
"""Get currently active memories for persona (legacy method)"""
|
||||
active = [
|
||||
mem for mem in self.memories.values()
|
||||
if mem.level != MemoryLevel.FORGOTTEN
|
||||
]
|
||||
|
||||
# Sort by importance and recency
|
||||
active.sort(
|
||||
key=lambda m: (m.is_core, m.importance_score, m.timestamp),
|
||||
reverse=True
|
||||
)
|
||||
|
||||
return active[:limit]
|
||||
|
||||
def get_contextual_memories(self, query: str = "", limit: int = 10) -> Dict[str, List[Memory]]:
|
||||
"""Get memories organized by priority with contextual relevance"""
|
||||
all_memories = [
|
||||
mem for mem in self.memories.values()
|
||||
if mem.level != MemoryLevel.FORGOTTEN
|
||||
]
|
||||
|
||||
# Categorize memories by type and importance
|
||||
core_memories = [mem for mem in all_memories if mem.level == MemoryLevel.CORE]
|
||||
summary_memories = [mem for mem in all_memories if mem.level == MemoryLevel.SUMMARY]
|
||||
recent_memories = [
|
||||
mem for mem in all_memories
|
||||
if mem.level == MemoryLevel.FULL_LOG
|
||||
and (datetime.now() - mem.timestamp).days < 3
|
||||
]
|
||||
|
||||
# Apply keyword relevance if query provided
|
||||
if query:
|
||||
query_lower = query.lower()
|
||||
|
||||
def relevance_score(memory: Memory) -> float:
|
||||
content_score = 1 if query_lower in memory.content.lower() else 0
|
||||
summary_score = 1 if memory.summary and query_lower in memory.summary.lower() else 0
|
||||
metadata_score = 1 if any(
|
||||
query_lower in str(v).lower()
|
||||
for v in (memory.metadata or {}).values()
|
||||
) else 0
|
||||
return content_score + summary_score + metadata_score
|
||||
|
||||
# Re-rank by relevance while maintaining type priority
|
||||
core_memories.sort(key=lambda m: (relevance_score(m), m.importance_score), reverse=True)
|
||||
summary_memories.sort(key=lambda m: (relevance_score(m), m.importance_score), reverse=True)
|
||||
recent_memories.sort(key=lambda m: (relevance_score(m), m.importance_score), reverse=True)
|
||||
else:
|
||||
# Sort by importance and recency
|
||||
core_memories.sort(key=lambda m: (m.importance_score, m.timestamp), reverse=True)
|
||||
summary_memories.sort(key=lambda m: (m.importance_score, m.timestamp), reverse=True)
|
||||
recent_memories.sort(key=lambda m: (m.importance_score, m.timestamp), reverse=True)
|
||||
|
||||
# Return organized memory structure
|
||||
return {
|
||||
"core": core_memories[:3], # Always include top core memories
|
||||
"summary": summary_memories[:3], # Recent summaries
|
||||
"recent": recent_memories[:limit-6], # Fill remaining with recent
|
||||
"all_active": all_memories[:limit] # Fallback for simple access
|
||||
}
|
||||
|
||||
def search_memories(self, keywords: List[str], memory_types: List[MemoryLevel] = None) -> List[Memory]:
|
||||
"""Search memories by keywords and optionally filter by memory types"""
|
||||
if memory_types is None:
|
||||
memory_types = [MemoryLevel.CORE, MemoryLevel.SUMMARY, MemoryLevel.FULL_LOG]
|
||||
|
||||
matching_memories = []
|
||||
|
||||
for memory in self.memories.values():
|
||||
if memory.level not in memory_types or memory.level == MemoryLevel.FORGOTTEN:
|
||||
continue
|
||||
|
||||
# Check if any keyword matches in content, summary, or metadata
|
||||
content_text = f"{memory.content} {memory.summary or ''}"
|
||||
if memory.metadata:
|
||||
content_text += " " + " ".join(str(v) for v in memory.metadata.values())
|
||||
|
||||
content_lower = content_text.lower()
|
||||
|
||||
# Score by keyword matches
|
||||
match_score = sum(
|
||||
keyword.lower() in content_lower
|
||||
for keyword in keywords
|
||||
)
|
||||
|
||||
if match_score > 0:
|
||||
# Add match score to memory for sorting
|
||||
memory_copy = memory.model_copy()
|
||||
memory_copy.importance_score += match_score * 0.1
|
||||
matching_memories.append(memory_copy)
|
||||
|
||||
# Sort by relevance (match score + importance + core status)
|
||||
matching_memories.sort(
|
||||
key=lambda m: (m.is_core, m.importance_score, m.timestamp),
|
||||
reverse=True
|
||||
)
|
||||
|
||||
return matching_memories
|
||||
@@ -1,88 +0,0 @@
|
||||
"""Data models for ai.gpt system"""
|
||||
|
||||
from datetime import datetime, date
|
||||
from typing import Optional, Dict, List, Any
|
||||
from enum import Enum
|
||||
from pydantic import BaseModel, Field, field_validator
|
||||
|
||||
|
||||
class MemoryLevel(str, Enum):
|
||||
"""Memory importance levels"""
|
||||
FULL_LOG = "full_log"
|
||||
SUMMARY = "summary"
|
||||
CORE = "core"
|
||||
FORGOTTEN = "forgotten"
|
||||
|
||||
|
||||
class RelationshipStatus(str, Enum):
|
||||
"""Relationship status levels"""
|
||||
STRANGER = "stranger"
|
||||
ACQUAINTANCE = "acquaintance"
|
||||
FRIEND = "friend"
|
||||
CLOSE_FRIEND = "close_friend"
|
||||
BROKEN = "broken" # 不可逆
|
||||
|
||||
|
||||
class Memory(BaseModel):
|
||||
"""Single memory unit"""
|
||||
id: str
|
||||
timestamp: datetime
|
||||
content: str
|
||||
summary: Optional[str] = None
|
||||
level: MemoryLevel = MemoryLevel.FULL_LOG
|
||||
importance_score: float
|
||||
is_core: bool = False
|
||||
decay_rate: float = 0.01
|
||||
metadata: Optional[Dict[str, Any]] = None
|
||||
|
||||
@field_validator('importance_score')
|
||||
@classmethod
|
||||
def validate_importance_score(cls, v):
|
||||
"""Ensure importance_score is within valid range, handle floating point precision issues"""
|
||||
if abs(v) < 1e-10: # Very close to zero
|
||||
return 0.0
|
||||
return max(0.0, min(1.0, v))
|
||||
|
||||
|
||||
class Relationship(BaseModel):
|
||||
"""Relationship with a specific user"""
|
||||
user_id: str # atproto DID
|
||||
status: RelationshipStatus = RelationshipStatus.STRANGER
|
||||
score: float = 0.0
|
||||
daily_interactions: int = 0
|
||||
total_interactions: int = 0
|
||||
last_interaction: Optional[datetime] = None
|
||||
transmission_enabled: bool = False
|
||||
threshold: float = 100.0
|
||||
decay_rate: float = 0.1
|
||||
daily_limit: int = 10
|
||||
is_broken: bool = False
|
||||
|
||||
|
||||
class AIFortune(BaseModel):
|
||||
"""Daily AI fortune affecting personality"""
|
||||
date: date
|
||||
fortune_value: int = Field(ge=1, le=10)
|
||||
consecutive_good: int = 0
|
||||
consecutive_bad: int = 0
|
||||
breakthrough_triggered: bool = False
|
||||
|
||||
|
||||
class PersonaState(BaseModel):
|
||||
"""Current persona state"""
|
||||
base_personality: Dict[str, float]
|
||||
current_mood: str
|
||||
fortune: AIFortune
|
||||
active_memories: List[str] # Memory IDs
|
||||
relationship_modifiers: Dict[str, float]
|
||||
|
||||
|
||||
class Conversation(BaseModel):
|
||||
"""Conversation log entry"""
|
||||
id: str
|
||||
user_id: str
|
||||
timestamp: datetime
|
||||
user_message: str
|
||||
ai_response: str
|
||||
relationship_delta: float = 0.0
|
||||
memory_created: bool = False
|
||||
@@ -1,250 +0,0 @@
|
||||
"""Persona management system integrating memory, relationships, and fortune"""
|
||||
|
||||
import json
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional
|
||||
import logging
|
||||
|
||||
from .models import PersonaState, Conversation
|
||||
from .memory import MemoryManager
|
||||
from .relationship import RelationshipTracker
|
||||
from .fortune import FortuneSystem
|
||||
|
||||
|
||||
class Persona:
|
||||
"""AI persona with unique characteristics based on interactions"""
|
||||
|
||||
def __init__(self, data_dir: Path, name: str = "ai"):
|
||||
self.data_dir = data_dir
|
||||
self.name = name
|
||||
self.memory = MemoryManager(data_dir)
|
||||
self.relationships = RelationshipTracker(data_dir)
|
||||
self.fortune_system = FortuneSystem(data_dir)
|
||||
self.logger = logging.getLogger(__name__)
|
||||
|
||||
# Base personality traits
|
||||
self.base_personality = {
|
||||
"curiosity": 0.7,
|
||||
"empathy": 0.8,
|
||||
"creativity": 0.6,
|
||||
"patience": 0.7,
|
||||
"optimism": 0.6
|
||||
}
|
||||
|
||||
self.state_file = data_dir / "persona_state.json"
|
||||
self._load_state()
|
||||
|
||||
def _load_state(self):
|
||||
"""Load persona state from storage"""
|
||||
if self.state_file.exists():
|
||||
with open(self.state_file, 'r', encoding='utf-8') as f:
|
||||
data = json.load(f)
|
||||
self.base_personality = data.get("base_personality", self.base_personality)
|
||||
|
||||
def _save_state(self):
|
||||
"""Save persona state to storage"""
|
||||
state_data = {
|
||||
"base_personality": self.base_personality,
|
||||
"last_updated": datetime.now().isoformat()
|
||||
}
|
||||
with open(self.state_file, 'w', encoding='utf-8') as f:
|
||||
json.dump(state_data, f, indent=2)
|
||||
|
||||
def get_current_state(self) -> PersonaState:
|
||||
"""Get current persona state including all modifiers"""
|
||||
# Get today's fortune
|
||||
fortune = self.fortune_system.get_today_fortune()
|
||||
fortune_modifiers = self.fortune_system.get_personality_modifier(fortune)
|
||||
|
||||
# Apply fortune modifiers to base personality
|
||||
current_personality = {}
|
||||
for trait, base_value in self.base_personality.items():
|
||||
modifier = fortune_modifiers.get(trait, 1.0)
|
||||
current_personality[trait] = min(1.0, base_value * modifier)
|
||||
|
||||
# Get active memories for context
|
||||
active_memories = self.memory.get_active_memories(limit=5)
|
||||
|
||||
# Determine mood based on fortune and recent interactions
|
||||
mood = self._determine_mood(fortune.fortune_value)
|
||||
|
||||
state = PersonaState(
|
||||
base_personality=current_personality,
|
||||
current_mood=mood,
|
||||
fortune=fortune,
|
||||
active_memories=[mem.id for mem in active_memories],
|
||||
relationship_modifiers={}
|
||||
)
|
||||
|
||||
return state
|
||||
|
||||
def _determine_mood(self, fortune_value: int) -> str:
|
||||
"""Determine current mood based on fortune and other factors"""
|
||||
if fortune_value >= 8:
|
||||
return "joyful"
|
||||
elif fortune_value >= 6:
|
||||
return "cheerful"
|
||||
elif fortune_value >= 4:
|
||||
return "neutral"
|
||||
elif fortune_value >= 2:
|
||||
return "melancholic"
|
||||
else:
|
||||
return "contemplative"
|
||||
|
||||
def build_context_prompt(self, user_id: str, current_message: str) -> str:
|
||||
"""Build context-aware prompt with relevant memories"""
|
||||
# Get contextual memories based on current message
|
||||
memory_groups = self.memory.get_contextual_memories(query=current_message, limit=8)
|
||||
|
||||
# Build context sections
|
||||
context_parts = []
|
||||
|
||||
# Core personality elements (always included)
|
||||
if memory_groups["core"]:
|
||||
core_context = "\n".join([mem.content for mem in memory_groups["core"]])
|
||||
context_parts.append(f"CORE PERSONALITY:\n{core_context}")
|
||||
|
||||
# Recent summaries for context
|
||||
if memory_groups["summary"]:
|
||||
summary_context = "\n".join([mem.summary or mem.content for mem in memory_groups["summary"]])
|
||||
context_parts.append(f"RECENT PATTERNS:\n{summary_context}")
|
||||
|
||||
# Recent specific interactions
|
||||
if memory_groups["recent"]:
|
||||
recent_context = "\n".join([
|
||||
f"[{mem.timestamp.strftime('%m-%d')}] {mem.content[:100]}..."
|
||||
for mem in memory_groups["recent"][:3]
|
||||
])
|
||||
context_parts.append(f"RECENT INTERACTIONS:\n{recent_context}")
|
||||
|
||||
# Get current persona state
|
||||
state = self.get_current_state()
|
||||
|
||||
# Build final prompt
|
||||
context_prompt = f"""You are an AI with persistent memory and evolving relationships. Your current state:
|
||||
|
||||
PERSONALITY: {', '.join([f'{k}={v:.1f}' for k, v in state.base_personality.items()])}
|
||||
MOOD: {state.current_mood}
|
||||
FORTUNE: {state.fortune.fortune_value}/10
|
||||
|
||||
"""
|
||||
|
||||
if context_parts:
|
||||
context_prompt += "RELEVANT CONTEXT:\n" + "\n\n".join(context_parts) + "\n\n"
|
||||
|
||||
context_prompt += f"""Respond to this message while staying true to your personality and the established relationship context:
|
||||
|
||||
User: {current_message}
|
||||
|
||||
AI:"""
|
||||
|
||||
return context_prompt
|
||||
|
||||
def process_interaction(self, user_id: str, message: str, ai_provider=None) -> tuple[str, float]:
|
||||
"""Process user interaction and generate response with enhanced context"""
|
||||
# Get current state
|
||||
state = self.get_current_state()
|
||||
|
||||
# Get relationship with user
|
||||
relationship = self.relationships.get_or_create_relationship(user_id)
|
||||
|
||||
# Enhanced response generation with context awareness
|
||||
if relationship.is_broken:
|
||||
response = "..."
|
||||
relationship_delta = 0.0
|
||||
else:
|
||||
if ai_provider:
|
||||
# Build context-aware prompt
|
||||
context_prompt = self.build_context_prompt(user_id, message)
|
||||
|
||||
# Generate response using AI with full context
|
||||
try:
|
||||
response = ai_provider.chat(context_prompt, max_tokens=200)
|
||||
|
||||
# Clean up response if it includes the prompt echo
|
||||
if "AI:" in response:
|
||||
response = response.split("AI:")[-1].strip()
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f"AI response generation failed: {e}")
|
||||
response = f"I appreciate your message about {message[:50]}..."
|
||||
|
||||
# Calculate relationship delta based on interaction quality and context
|
||||
if state.current_mood in ["joyful", "cheerful"]:
|
||||
relationship_delta = 2.0
|
||||
elif relationship.status.value == "close_friend":
|
||||
relationship_delta = 1.5
|
||||
else:
|
||||
relationship_delta = 1.0
|
||||
else:
|
||||
# Context-aware fallback responses
|
||||
memory_groups = self.memory.get_contextual_memories(query=message, limit=3)
|
||||
|
||||
if memory_groups["core"]:
|
||||
# Reference core memories for continuity
|
||||
response = f"Based on our relationship, I think {message.lower()} connects to what we've discussed before."
|
||||
relationship_delta = 1.5
|
||||
elif state.current_mood == "joyful":
|
||||
response = f"What a wonderful day! {message} sounds interesting!"
|
||||
relationship_delta = 2.0
|
||||
elif relationship.status.value == "close_friend":
|
||||
response = f"I've been thinking about our conversations. {message}"
|
||||
relationship_delta = 1.5
|
||||
else:
|
||||
response = f"I understand. {message}"
|
||||
relationship_delta = 1.0
|
||||
|
||||
# Create conversation record
|
||||
conv_id = f"{user_id}_{datetime.now().timestamp()}"
|
||||
conversation = Conversation(
|
||||
id=conv_id,
|
||||
user_id=user_id,
|
||||
timestamp=datetime.now(),
|
||||
user_message=message,
|
||||
ai_response=response,
|
||||
relationship_delta=relationship_delta,
|
||||
memory_created=True
|
||||
)
|
||||
|
||||
# Update memory
|
||||
self.memory.add_conversation(conversation)
|
||||
|
||||
# Update relationship
|
||||
self.relationships.update_interaction(user_id, relationship_delta)
|
||||
|
||||
return response, relationship_delta
|
||||
|
||||
def can_transmit_to(self, user_id: str) -> bool:
|
||||
"""Check if AI can transmit messages to this user"""
|
||||
relationship = self.relationships.get_or_create_relationship(user_id)
|
||||
return relationship.transmission_enabled and not relationship.is_broken
|
||||
|
||||
def daily_maintenance(self):
|
||||
"""Perform daily maintenance tasks"""
|
||||
self.logger.info("Performing daily maintenance...")
|
||||
|
||||
# Apply time decay to relationships
|
||||
self.relationships.apply_time_decay()
|
||||
|
||||
# Apply forgetting to memories
|
||||
self.memory.apply_forgetting()
|
||||
|
||||
# Identify core memories
|
||||
core_memories = self.memory.identify_core_memories()
|
||||
if core_memories:
|
||||
self.logger.info(f"Identified {len(core_memories)} new core memories")
|
||||
|
||||
# Create memory summaries
|
||||
for user_id in self.relationships.relationships:
|
||||
try:
|
||||
from .ai_provider import create_ai_provider
|
||||
ai_provider = create_ai_provider()
|
||||
summary = self.memory.create_smart_summary(user_id, ai_provider=ai_provider)
|
||||
if summary:
|
||||
self.logger.info(f"Created smart summary for interactions with {user_id}")
|
||||
except Exception as e:
|
||||
self.logger.warning(f"Could not create AI summary for {user_id}: {e}")
|
||||
|
||||
self._save_state()
|
||||
self.logger.info("Daily maintenance completed")
|
||||
@@ -1,321 +0,0 @@
|
||||
"""Project management and continuous development logic for ai.shell"""
|
||||
|
||||
import json
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional, Any
|
||||
from datetime import datetime
|
||||
import subprocess
|
||||
import hashlib
|
||||
|
||||
from .models import Memory
|
||||
from .ai_provider import AIProvider
|
||||
|
||||
|
||||
class ProjectState:
|
||||
"""プロジェクトの現在状態を追跡"""
|
||||
|
||||
def __init__(self, project_root: Path):
|
||||
self.project_root = project_root
|
||||
self.files_state: Dict[str, str] = {} # ファイルパス: ハッシュ
|
||||
self.last_analysis: Optional[datetime] = None
|
||||
self.project_context: Optional[str] = None
|
||||
self.development_goals: List[str] = []
|
||||
self.known_patterns: Dict[str, Any] = {}
|
||||
|
||||
def scan_project_files(self) -> Dict[str, str]:
|
||||
"""プロジェクトファイルをスキャンしてハッシュ計算"""
|
||||
current_state = {}
|
||||
|
||||
# 対象ファイル拡張子
|
||||
target_extensions = {'.py', '.js', '.ts', '.rs', '.go', '.java', '.cpp', '.c', '.h'}
|
||||
|
||||
for file_path in self.project_root.rglob('*'):
|
||||
if (file_path.is_file() and
|
||||
file_path.suffix in target_extensions and
|
||||
not any(part.startswith('.') for part in file_path.parts)):
|
||||
|
||||
try:
|
||||
with open(file_path, 'r', encoding='utf-8') as f:
|
||||
content = f.read()
|
||||
|
||||
file_hash = hashlib.md5(content.encode()).hexdigest()
|
||||
relative_path = str(file_path.relative_to(self.project_root))
|
||||
current_state[relative_path] = file_hash
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
return current_state
|
||||
|
||||
def detect_changes(self) -> Dict[str, str]:
|
||||
"""ファイル変更を検出"""
|
||||
current_state = self.scan_project_files()
|
||||
changes = {}
|
||||
|
||||
# 新規・変更ファイル
|
||||
for path, current_hash in current_state.items():
|
||||
if path not in self.files_state or self.files_state[path] != current_hash:
|
||||
changes[path] = "modified" if path in self.files_state else "added"
|
||||
|
||||
# 削除ファイル
|
||||
for path in self.files_state:
|
||||
if path not in current_state:
|
||||
changes[path] = "deleted"
|
||||
|
||||
self.files_state = current_state
|
||||
return changes
|
||||
|
||||
|
||||
class ContinuousDeveloper:
|
||||
"""Claude Code的な継続開発機能"""
|
||||
|
||||
def __init__(self, project_root: Path, ai_provider: Optional[AIProvider] = None):
|
||||
self.project_root = project_root
|
||||
self.ai_provider = ai_provider
|
||||
self.project_state = ProjectState(project_root)
|
||||
self.session_memory: List[str] = []
|
||||
|
||||
def load_project_context(self) -> str:
|
||||
"""プロジェクト文脈を読み込み"""
|
||||
context_files = [
|
||||
"claude.md", "aishell.md", "README.md",
|
||||
"pyproject.toml", "package.json", "Cargo.toml"
|
||||
]
|
||||
|
||||
context_parts = []
|
||||
for filename in context_files:
|
||||
file_path = self.project_root / filename
|
||||
if file_path.exists():
|
||||
try:
|
||||
with open(file_path, 'r', encoding='utf-8') as f:
|
||||
content = f.read()
|
||||
context_parts.append(f"## {filename}\n{content}")
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
return "\n\n".join(context_parts)
|
||||
|
||||
def analyze_project_structure(self) -> Dict[str, Any]:
|
||||
"""プロジェクト構造を分析"""
|
||||
analysis = {
|
||||
"language": self._detect_primary_language(),
|
||||
"framework": self._detect_framework(),
|
||||
"structure": self._analyze_file_structure(),
|
||||
"dependencies": self._analyze_dependencies(),
|
||||
"patterns": self._detect_code_patterns()
|
||||
}
|
||||
return analysis
|
||||
|
||||
def _detect_primary_language(self) -> str:
|
||||
"""主要言語を検出"""
|
||||
file_counts = {}
|
||||
for file_path in self.project_root.rglob('*'):
|
||||
if file_path.is_file() and file_path.suffix:
|
||||
ext = file_path.suffix.lower()
|
||||
file_counts[ext] = file_counts.get(ext, 0) + 1
|
||||
|
||||
language_map = {
|
||||
'.py': 'Python',
|
||||
'.js': 'JavaScript',
|
||||
'.ts': 'TypeScript',
|
||||
'.rs': 'Rust',
|
||||
'.go': 'Go',
|
||||
'.java': 'Java'
|
||||
}
|
||||
|
||||
if file_counts:
|
||||
primary_ext = max(file_counts.items(), key=lambda x: x[1])[0]
|
||||
return language_map.get(primary_ext, 'Unknown')
|
||||
return 'Unknown'
|
||||
|
||||
def _detect_framework(self) -> str:
|
||||
"""フレームワークを検出"""
|
||||
frameworks = {
|
||||
'fastapi': ['fastapi', 'uvicorn'],
|
||||
'django': ['django'],
|
||||
'flask': ['flask'],
|
||||
'react': ['react'],
|
||||
'next.js': ['next'],
|
||||
'rust-actix': ['actix-web'],
|
||||
}
|
||||
|
||||
# pyproject.toml, package.json, Cargo.tomlから依存関係を確認
|
||||
for config_file in ['pyproject.toml', 'package.json', 'Cargo.toml']:
|
||||
config_path = self.project_root / config_file
|
||||
if config_path.exists():
|
||||
try:
|
||||
with open(config_path, 'r') as f:
|
||||
content = f.read().lower()
|
||||
|
||||
for framework, keywords in frameworks.items():
|
||||
if any(keyword in content for keyword in keywords):
|
||||
return framework
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
return 'Unknown'
|
||||
|
||||
def _analyze_file_structure(self) -> Dict[str, List[str]]:
|
||||
"""ファイル構造を分析"""
|
||||
structure = {"directories": [], "key_files": []}
|
||||
|
||||
for item in self.project_root.iterdir():
|
||||
if item.is_dir() and not item.name.startswith('.'):
|
||||
structure["directories"].append(item.name)
|
||||
elif item.is_file() and item.name in [
|
||||
'main.py', 'app.py', 'index.js', 'main.rs', 'main.go'
|
||||
]:
|
||||
structure["key_files"].append(item.name)
|
||||
|
||||
return structure
|
||||
|
||||
def _analyze_dependencies(self) -> List[str]:
|
||||
"""依存関係を分析"""
|
||||
deps = []
|
||||
|
||||
# Python dependencies
|
||||
pyproject = self.project_root / "pyproject.toml"
|
||||
if pyproject.exists():
|
||||
try:
|
||||
with open(pyproject, 'r') as f:
|
||||
content = f.read()
|
||||
# Simple regex would be better but for now just check for common packages
|
||||
common_packages = ['fastapi', 'pydantic', 'uvicorn', 'ollama', 'openai']
|
||||
for package in common_packages:
|
||||
if package in content:
|
||||
deps.append(package)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return deps
|
||||
|
||||
def _detect_code_patterns(self) -> Dict[str, int]:
|
||||
"""コードパターンを検出"""
|
||||
patterns = {
|
||||
"classes": 0,
|
||||
"functions": 0,
|
||||
"api_endpoints": 0,
|
||||
"async_functions": 0
|
||||
}
|
||||
|
||||
for py_file in self.project_root.rglob('*.py'):
|
||||
try:
|
||||
with open(py_file, 'r', encoding='utf-8') as f:
|
||||
content = f.read()
|
||||
|
||||
patterns["classes"] += content.count('class ')
|
||||
patterns["functions"] += content.count('def ')
|
||||
patterns["api_endpoints"] += content.count('@app.')
|
||||
patterns["async_functions"] += content.count('async def')
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
return patterns
|
||||
|
||||
def suggest_next_steps(self, current_task: Optional[str] = None) -> List[str]:
|
||||
"""次のステップを提案"""
|
||||
if not self.ai_provider:
|
||||
return ["AI provider not available for suggestions"]
|
||||
|
||||
context = self.load_project_context()
|
||||
analysis = self.analyze_project_structure()
|
||||
changes = self.project_state.detect_changes()
|
||||
|
||||
prompt = f"""
|
||||
プロジェクト分析に基づいて、次の開発ステップを3-5個提案してください。
|
||||
|
||||
## プロジェクト文脈
|
||||
{context[:1000]}
|
||||
|
||||
## 構造分析
|
||||
言語: {analysis['language']}
|
||||
フレームワーク: {analysis['framework']}
|
||||
パターン: {analysis['patterns']}
|
||||
|
||||
## 最近の変更
|
||||
{changes}
|
||||
|
||||
## 現在のタスク
|
||||
{current_task or "特になし"}
|
||||
|
||||
具体的で実行可能なステップを提案してください:
|
||||
"""
|
||||
|
||||
try:
|
||||
response = self.ai_provider.chat(prompt, max_tokens=300)
|
||||
# Simple parsing - in real implementation would be more sophisticated
|
||||
steps = [line.strip() for line in response.split('\n')
|
||||
if line.strip() and (line.strip().startswith('-') or line.strip().startswith('1.'))]
|
||||
return steps[:5]
|
||||
except Exception as e:
|
||||
return [f"Error generating suggestions: {str(e)}"]
|
||||
|
||||
def generate_code(self, description: str, file_path: Optional[str] = None) -> str:
|
||||
"""コード生成"""
|
||||
if not self.ai_provider:
|
||||
return "AI provider not available for code generation"
|
||||
|
||||
context = self.load_project_context()
|
||||
analysis = self.analyze_project_structure()
|
||||
|
||||
prompt = f"""
|
||||
以下の仕様に基づいてコードを生成してください。
|
||||
|
||||
## プロジェクト文脈
|
||||
{context[:800]}
|
||||
|
||||
## 言語・フレームワーク
|
||||
言語: {analysis['language']}
|
||||
フレームワーク: {analysis['framework']}
|
||||
既存パターン: {analysis['patterns']}
|
||||
|
||||
## 生成要求
|
||||
{description}
|
||||
|
||||
{"ファイルパス: " + file_path if file_path else ""}
|
||||
|
||||
プロジェクトの既存コードスタイルと一貫性を保ったコードを生成してください:
|
||||
"""
|
||||
|
||||
try:
|
||||
return self.ai_provider.chat(prompt, max_tokens=500)
|
||||
except Exception as e:
|
||||
return f"Error generating code: {str(e)}"
|
||||
|
||||
def analyze_file(self, file_path: str) -> str:
|
||||
"""ファイル分析"""
|
||||
full_path = self.project_root / file_path
|
||||
if not full_path.exists():
|
||||
return f"File not found: {file_path}"
|
||||
|
||||
try:
|
||||
with open(full_path, 'r', encoding='utf-8') as f:
|
||||
content = f.read()
|
||||
except Exception as e:
|
||||
return f"Error reading file: {str(e)}"
|
||||
|
||||
if not self.ai_provider:
|
||||
return f"File contents ({len(content)} chars):\n{content[:200]}..."
|
||||
|
||||
context = self.load_project_context()
|
||||
|
||||
prompt = f"""
|
||||
以下のファイルを分析して、改善点や問題点を指摘してください。
|
||||
|
||||
## プロジェクト文脈
|
||||
{context[:500]}
|
||||
|
||||
## ファイル: {file_path}
|
||||
{content[:1500]}
|
||||
|
||||
分析内容:
|
||||
1. コード品質
|
||||
2. プロジェクトとの整合性
|
||||
3. 改善提案
|
||||
4. 潜在的な問題
|
||||
"""
|
||||
|
||||
try:
|
||||
return self.ai_provider.chat(prompt, max_tokens=400)
|
||||
except Exception as e:
|
||||
return f"Error analyzing file: {str(e)}"
|
||||
@@ -1,135 +0,0 @@
|
||||
"""Relationship tracking system with irreversible damage"""
|
||||
|
||||
import json
|
||||
from datetime import datetime, timedelta
|
||||
from pathlib import Path
|
||||
from typing import Dict, Optional
|
||||
import logging
|
||||
|
||||
from .models import Relationship, RelationshipStatus
|
||||
|
||||
|
||||
class RelationshipTracker:
|
||||
"""Tracks and manages relationships with users"""
|
||||
|
||||
def __init__(self, data_dir: Path):
|
||||
self.data_dir = data_dir
|
||||
self.relationships_file = data_dir / "relationships.json"
|
||||
self.relationships: Dict[str, Relationship] = {}
|
||||
self.logger = logging.getLogger(__name__)
|
||||
self._load_relationships()
|
||||
|
||||
def _load_relationships(self):
|
||||
"""Load relationships from persistent storage"""
|
||||
if self.relationships_file.exists():
|
||||
with open(self.relationships_file, 'r', encoding='utf-8') as f:
|
||||
data = json.load(f)
|
||||
for user_id, rel_data in data.items():
|
||||
self.relationships[user_id] = Relationship(**rel_data)
|
||||
|
||||
def _save_relationships(self):
|
||||
"""Save relationships to persistent storage"""
|
||||
data = {
|
||||
user_id: rel.model_dump(mode='json')
|
||||
for user_id, rel in self.relationships.items()
|
||||
}
|
||||
with open(self.relationships_file, 'w', encoding='utf-8') as f:
|
||||
json.dump(data, f, indent=2, default=str)
|
||||
|
||||
def get_or_create_relationship(self, user_id: str) -> Relationship:
|
||||
"""Get existing relationship or create new one"""
|
||||
if user_id not in self.relationships:
|
||||
self.relationships[user_id] = Relationship(user_id=user_id)
|
||||
self._save_relationships()
|
||||
return self.relationships[user_id]
|
||||
|
||||
def update_interaction(self, user_id: str, delta: float) -> Relationship:
|
||||
"""Update relationship based on interaction"""
|
||||
rel = self.get_or_create_relationship(user_id)
|
||||
|
||||
# Check if relationship is broken (irreversible)
|
||||
if rel.is_broken:
|
||||
self.logger.warning(f"Relationship with {user_id} is broken. No updates allowed.")
|
||||
return rel
|
||||
|
||||
# Check daily limit
|
||||
if rel.last_interaction and rel.last_interaction.date() == datetime.now().date():
|
||||
if rel.daily_interactions >= rel.daily_limit:
|
||||
self.logger.info(f"Daily interaction limit reached for {user_id}")
|
||||
return rel
|
||||
else:
|
||||
rel.daily_interactions = 0
|
||||
|
||||
# Update interaction counts
|
||||
rel.daily_interactions += 1
|
||||
rel.total_interactions += 1
|
||||
rel.last_interaction = datetime.now()
|
||||
|
||||
# Update score with bounds
|
||||
old_score = rel.score
|
||||
rel.score += delta
|
||||
rel.score = max(0.0, min(200.0, rel.score)) # 0-200 range
|
||||
|
||||
# Check for relationship damage
|
||||
if delta < -10.0: # Significant negative interaction
|
||||
self.logger.warning(f"Major relationship damage with {user_id}: {delta}")
|
||||
if rel.score <= 0:
|
||||
rel.is_broken = True
|
||||
rel.status = RelationshipStatus.BROKEN
|
||||
rel.transmission_enabled = False
|
||||
self.logger.error(f"Relationship with {user_id} is now BROKEN (irreversible)")
|
||||
|
||||
# Update relationship status based on score
|
||||
if not rel.is_broken:
|
||||
if rel.score >= 150:
|
||||
rel.status = RelationshipStatus.CLOSE_FRIEND
|
||||
elif rel.score >= 100:
|
||||
rel.status = RelationshipStatus.FRIEND
|
||||
elif rel.score >= 50:
|
||||
rel.status = RelationshipStatus.ACQUAINTANCE
|
||||
else:
|
||||
rel.status = RelationshipStatus.STRANGER
|
||||
|
||||
# Check transmission threshold
|
||||
if rel.score >= rel.threshold and not rel.transmission_enabled:
|
||||
rel.transmission_enabled = True
|
||||
self.logger.info(f"Transmission enabled for {user_id}!")
|
||||
|
||||
self._save_relationships()
|
||||
return rel
|
||||
|
||||
def apply_time_decay(self):
|
||||
"""Apply time-based decay to all relationships"""
|
||||
now = datetime.now()
|
||||
|
||||
for user_id, rel in self.relationships.items():
|
||||
if rel.is_broken or not rel.last_interaction:
|
||||
continue
|
||||
|
||||
# Calculate days since last interaction
|
||||
days_inactive = (now - rel.last_interaction).days
|
||||
|
||||
if days_inactive > 0:
|
||||
# Apply decay
|
||||
decay_amount = rel.decay_rate * days_inactive
|
||||
old_score = rel.score
|
||||
rel.score = max(0.0, rel.score - decay_amount)
|
||||
|
||||
# Update status if score dropped
|
||||
if rel.score < rel.threshold:
|
||||
rel.transmission_enabled = False
|
||||
|
||||
if decay_amount > 0:
|
||||
self.logger.info(
|
||||
f"Applied decay to {user_id}: {old_score:.2f} -> {rel.score:.2f}"
|
||||
)
|
||||
|
||||
self._save_relationships()
|
||||
|
||||
def get_transmission_eligible(self) -> Dict[str, Relationship]:
|
||||
"""Get all relationships eligible for transmission"""
|
||||
return {
|
||||
user_id: rel
|
||||
for user_id, rel in self.relationships.items()
|
||||
if rel.transmission_enabled and not rel.is_broken
|
||||
}
|
||||
@@ -1,312 +0,0 @@
|
||||
"""Scheduler for autonomous AI tasks"""
|
||||
|
||||
import json
|
||||
import asyncio
|
||||
from datetime import datetime, timedelta
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional, Any, Callable
|
||||
from enum import Enum
|
||||
import logging
|
||||
|
||||
from apscheduler.schedulers.asyncio import AsyncIOScheduler
|
||||
from apscheduler.triggers.cron import CronTrigger
|
||||
from apscheduler.triggers.interval import IntervalTrigger
|
||||
from croniter import croniter
|
||||
|
||||
from .persona import Persona
|
||||
from .transmission import TransmissionController
|
||||
from .ai_provider import create_ai_provider
|
||||
|
||||
|
||||
class TaskType(str, Enum):
|
||||
"""Types of scheduled tasks"""
|
||||
TRANSMISSION_CHECK = "transmission_check"
|
||||
MAINTENANCE = "maintenance"
|
||||
FORTUNE_UPDATE = "fortune_update"
|
||||
RELATIONSHIP_DECAY = "relationship_decay"
|
||||
MEMORY_SUMMARY = "memory_summary"
|
||||
CUSTOM = "custom"
|
||||
|
||||
|
||||
class ScheduledTask:
|
||||
"""Represents a scheduled task"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
task_id: str,
|
||||
task_type: TaskType,
|
||||
schedule: str, # Cron expression or interval
|
||||
enabled: bool = True,
|
||||
last_run: Optional[datetime] = None,
|
||||
next_run: Optional[datetime] = None,
|
||||
metadata: Optional[Dict[str, Any]] = None
|
||||
):
|
||||
self.task_id = task_id
|
||||
self.task_type = task_type
|
||||
self.schedule = schedule
|
||||
self.enabled = enabled
|
||||
self.last_run = last_run
|
||||
self.next_run = next_run
|
||||
self.metadata = metadata or {}
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Convert to dictionary for storage"""
|
||||
return {
|
||||
"task_id": self.task_id,
|
||||
"task_type": self.task_type.value,
|
||||
"schedule": self.schedule,
|
||||
"enabled": self.enabled,
|
||||
"last_run": self.last_run.isoformat() if self.last_run else None,
|
||||
"next_run": self.next_run.isoformat() if self.next_run else None,
|
||||
"metadata": self.metadata
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> "ScheduledTask":
|
||||
"""Create from dictionary"""
|
||||
return cls(
|
||||
task_id=data["task_id"],
|
||||
task_type=TaskType(data["task_type"]),
|
||||
schedule=data["schedule"],
|
||||
enabled=data.get("enabled", True),
|
||||
last_run=datetime.fromisoformat(data["last_run"]) if data.get("last_run") else None,
|
||||
next_run=datetime.fromisoformat(data["next_run"]) if data.get("next_run") else None,
|
||||
metadata=data.get("metadata", {})
|
||||
)
|
||||
|
||||
|
||||
class AIScheduler:
|
||||
"""Manages scheduled tasks for the AI system"""
|
||||
|
||||
def __init__(self, data_dir: Path, persona: Persona):
|
||||
self.data_dir = data_dir
|
||||
self.persona = persona
|
||||
self.tasks_file = data_dir / "scheduled_tasks.json"
|
||||
self.tasks: Dict[str, ScheduledTask] = {}
|
||||
self.scheduler = AsyncIOScheduler()
|
||||
self.logger = logging.getLogger(__name__)
|
||||
self._load_tasks()
|
||||
|
||||
# Task handlers
|
||||
self.task_handlers: Dict[TaskType, Callable] = {
|
||||
TaskType.TRANSMISSION_CHECK: self._handle_transmission_check,
|
||||
TaskType.MAINTENANCE: self._handle_maintenance,
|
||||
TaskType.FORTUNE_UPDATE: self._handle_fortune_update,
|
||||
TaskType.RELATIONSHIP_DECAY: self._handle_relationship_decay,
|
||||
TaskType.MEMORY_SUMMARY: self._handle_memory_summary,
|
||||
}
|
||||
|
||||
def _load_tasks(self):
|
||||
"""Load scheduled tasks from storage"""
|
||||
if self.tasks_file.exists():
|
||||
with open(self.tasks_file, 'r', encoding='utf-8') as f:
|
||||
data = json.load(f)
|
||||
for task_data in data:
|
||||
task = ScheduledTask.from_dict(task_data)
|
||||
self.tasks[task.task_id] = task
|
||||
|
||||
def _save_tasks(self):
|
||||
"""Save scheduled tasks to storage"""
|
||||
tasks_data = [task.to_dict() for task in self.tasks.values()]
|
||||
with open(self.tasks_file, 'w', encoding='utf-8') as f:
|
||||
json.dump(tasks_data, f, indent=2, default=str)
|
||||
|
||||
def add_task(
|
||||
self,
|
||||
task_type: TaskType,
|
||||
schedule: str,
|
||||
task_id: Optional[str] = None,
|
||||
metadata: Optional[Dict[str, Any]] = None
|
||||
) -> ScheduledTask:
|
||||
"""Add a new scheduled task"""
|
||||
if task_id is None:
|
||||
task_id = f"{task_type.value}_{datetime.now().timestamp()}"
|
||||
|
||||
# Validate schedule
|
||||
if not self._validate_schedule(schedule):
|
||||
raise ValueError(f"Invalid schedule expression: {schedule}")
|
||||
|
||||
task = ScheduledTask(
|
||||
task_id=task_id,
|
||||
task_type=task_type,
|
||||
schedule=schedule,
|
||||
metadata=metadata
|
||||
)
|
||||
|
||||
self.tasks[task_id] = task
|
||||
self._save_tasks()
|
||||
|
||||
# Schedule the task if scheduler is running
|
||||
if self.scheduler.running:
|
||||
self._schedule_task(task)
|
||||
|
||||
self.logger.info(f"Added task {task_id} with schedule {schedule}")
|
||||
return task
|
||||
|
||||
def _validate_schedule(self, schedule: str) -> bool:
|
||||
"""Validate schedule expression"""
|
||||
# Check if it's a cron expression
|
||||
if ' ' in schedule:
|
||||
try:
|
||||
croniter(schedule)
|
||||
return True
|
||||
except:
|
||||
return False
|
||||
|
||||
# Check if it's an interval expression (e.g., "5m", "1h", "2d")
|
||||
import re
|
||||
pattern = r'^\d+[smhd]$'
|
||||
return bool(re.match(pattern, schedule))
|
||||
|
||||
def _parse_interval(self, interval: str) -> int:
|
||||
"""Parse interval string to seconds"""
|
||||
unit = interval[-1]
|
||||
value = int(interval[:-1])
|
||||
|
||||
multipliers = {
|
||||
's': 1,
|
||||
'm': 60,
|
||||
'h': 3600,
|
||||
'd': 86400
|
||||
}
|
||||
|
||||
return value * multipliers.get(unit, 1)
|
||||
|
||||
def _schedule_task(self, task: ScheduledTask):
|
||||
"""Schedule a task with APScheduler"""
|
||||
if not task.enabled:
|
||||
return
|
||||
|
||||
handler = self.task_handlers.get(task.task_type)
|
||||
if not handler:
|
||||
self.logger.warning(f"No handler for task type {task.task_type}")
|
||||
return
|
||||
|
||||
# Determine trigger
|
||||
if ' ' in task.schedule:
|
||||
# Cron expression
|
||||
trigger = CronTrigger.from_crontab(task.schedule)
|
||||
else:
|
||||
# Interval expression
|
||||
seconds = self._parse_interval(task.schedule)
|
||||
trigger = IntervalTrigger(seconds=seconds)
|
||||
|
||||
# Add job
|
||||
self.scheduler.add_job(
|
||||
lambda: asyncio.create_task(self._run_task(task)),
|
||||
trigger=trigger,
|
||||
id=task.task_id,
|
||||
replace_existing=True
|
||||
)
|
||||
|
||||
async def _run_task(self, task: ScheduledTask):
|
||||
"""Run a scheduled task"""
|
||||
self.logger.info(f"Running task {task.task_id}")
|
||||
|
||||
task.last_run = datetime.now()
|
||||
|
||||
try:
|
||||
handler = self.task_handlers.get(task.task_type)
|
||||
if handler:
|
||||
await handler(task)
|
||||
else:
|
||||
self.logger.warning(f"No handler for task type {task.task_type}")
|
||||
except Exception as e:
|
||||
self.logger.error(f"Error running task {task.task_id}: {e}")
|
||||
|
||||
self._save_tasks()
|
||||
|
||||
async def _handle_transmission_check(self, task: ScheduledTask):
|
||||
"""Check and execute autonomous transmissions"""
|
||||
controller = TransmissionController(self.persona, self.data_dir)
|
||||
eligible = controller.check_transmission_eligibility()
|
||||
|
||||
# Get AI provider from metadata
|
||||
provider_name = task.metadata.get("provider", "ollama")
|
||||
model = task.metadata.get("model", "qwen2.5")
|
||||
|
||||
try:
|
||||
ai_provider = create_ai_provider(provider_name, model)
|
||||
except:
|
||||
ai_provider = None
|
||||
|
||||
for user_id, rel in eligible.items():
|
||||
message = controller.generate_transmission_message(user_id)
|
||||
if message:
|
||||
# For now, just print the message
|
||||
print(f"\n🤖 [AI Transmission] {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
|
||||
print(f"To: {user_id}")
|
||||
print(f"Relationship: {rel.status.value} (score: {rel.score:.2f})")
|
||||
print(f"Message: {message}")
|
||||
print("-" * 50)
|
||||
|
||||
controller.record_transmission(user_id, message, success=True)
|
||||
self.logger.info(f"Transmitted to {user_id}: {message}")
|
||||
|
||||
async def _handle_maintenance(self, task: ScheduledTask):
|
||||
"""Run daily maintenance"""
|
||||
self.persona.daily_maintenance()
|
||||
self.logger.info("Daily maintenance completed")
|
||||
|
||||
async def _handle_fortune_update(self, task: ScheduledTask):
|
||||
"""Update AI fortune"""
|
||||
fortune = self.persona.fortune_system.get_today_fortune()
|
||||
self.logger.info(f"Fortune updated: {fortune.fortune_value}/10")
|
||||
|
||||
async def _handle_relationship_decay(self, task: ScheduledTask):
|
||||
"""Apply relationship decay"""
|
||||
self.persona.relationships.apply_time_decay()
|
||||
self.logger.info("Relationship decay applied")
|
||||
|
||||
async def _handle_memory_summary(self, task: ScheduledTask):
|
||||
"""Create memory summaries"""
|
||||
for user_id in self.persona.relationships.relationships:
|
||||
summary = self.persona.memory.summarize_memories(user_id)
|
||||
if summary:
|
||||
self.logger.info(f"Created memory summary for {user_id}")
|
||||
|
||||
def start(self):
|
||||
"""Start the scheduler"""
|
||||
# Schedule all enabled tasks
|
||||
for task in self.tasks.values():
|
||||
if task.enabled:
|
||||
self._schedule_task(task)
|
||||
|
||||
self.scheduler.start()
|
||||
self.logger.info("Scheduler started")
|
||||
|
||||
def stop(self):
|
||||
"""Stop the scheduler"""
|
||||
self.scheduler.shutdown()
|
||||
self.logger.info("Scheduler stopped")
|
||||
|
||||
def get_tasks(self) -> List[ScheduledTask]:
|
||||
"""Get all scheduled tasks"""
|
||||
return list(self.tasks.values())
|
||||
|
||||
def enable_task(self, task_id: str):
|
||||
"""Enable a task"""
|
||||
if task_id in self.tasks:
|
||||
self.tasks[task_id].enabled = True
|
||||
self._save_tasks()
|
||||
if self.scheduler.running:
|
||||
self._schedule_task(self.tasks[task_id])
|
||||
|
||||
def disable_task(self, task_id: str):
|
||||
"""Disable a task"""
|
||||
if task_id in self.tasks:
|
||||
self.tasks[task_id].enabled = False
|
||||
self._save_tasks()
|
||||
if self.scheduler.running:
|
||||
self.scheduler.remove_job(task_id)
|
||||
|
||||
def remove_task(self, task_id: str):
|
||||
"""Remove a task"""
|
||||
if task_id in self.tasks:
|
||||
del self.tasks[task_id]
|
||||
self._save_tasks()
|
||||
if self.scheduler.running:
|
||||
try:
|
||||
self.scheduler.remove_job(task_id)
|
||||
except:
|
||||
pass
|
||||
@@ -1,111 +0,0 @@
|
||||
"""Transmission controller for autonomous message sending"""
|
||||
|
||||
import json
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import List, Dict, Optional
|
||||
import logging
|
||||
|
||||
from .models import Relationship
|
||||
from .persona import Persona
|
||||
|
||||
|
||||
class TransmissionController:
|
||||
"""Controls when and how AI transmits messages autonomously"""
|
||||
|
||||
def __init__(self, persona: Persona, data_dir: Path):
|
||||
self.persona = persona
|
||||
self.data_dir = data_dir
|
||||
self.transmission_log_file = data_dir / "transmissions.json"
|
||||
self.transmissions: List[Dict] = []
|
||||
self.logger = logging.getLogger(__name__)
|
||||
self._load_transmissions()
|
||||
|
||||
def _load_transmissions(self):
|
||||
"""Load transmission history"""
|
||||
if self.transmission_log_file.exists():
|
||||
with open(self.transmission_log_file, 'r', encoding='utf-8') as f:
|
||||
self.transmissions = json.load(f)
|
||||
|
||||
def _save_transmissions(self):
|
||||
"""Save transmission history"""
|
||||
with open(self.transmission_log_file, 'w', encoding='utf-8') as f:
|
||||
json.dump(self.transmissions, f, indent=2, default=str)
|
||||
|
||||
def check_transmission_eligibility(self) -> Dict[str, Relationship]:
|
||||
"""Check which users are eligible for transmission"""
|
||||
eligible = self.persona.relationships.get_transmission_eligible()
|
||||
|
||||
# Additional checks could be added here
|
||||
# - Time since last transmission
|
||||
# - User online status
|
||||
# - Context appropriateness
|
||||
|
||||
return eligible
|
||||
|
||||
def generate_transmission_message(self, user_id: str) -> Optional[str]:
|
||||
"""Generate a message to transmit to user"""
|
||||
if not self.persona.can_transmit_to(user_id):
|
||||
return None
|
||||
|
||||
state = self.persona.get_current_state()
|
||||
relationship = self.persona.relationships.get_or_create_relationship(user_id)
|
||||
|
||||
# Get recent memories related to this user
|
||||
active_memories = self.persona.memory.get_active_memories(limit=3)
|
||||
|
||||
# Simple message generation based on mood and relationship
|
||||
if state.fortune.breakthrough_triggered:
|
||||
message = "Something special happened today! I felt compelled to reach out."
|
||||
elif state.current_mood == "joyful":
|
||||
message = "I was thinking of you today. Hope you're doing well!"
|
||||
elif relationship.status.value == "close_friend":
|
||||
message = "I've been reflecting on our conversations. Thank you for being here."
|
||||
else:
|
||||
message = "Hello! I wanted to check in with you."
|
||||
|
||||
return message
|
||||
|
||||
def record_transmission(self, user_id: str, message: str, success: bool):
|
||||
"""Record a transmission attempt"""
|
||||
transmission = {
|
||||
"timestamp": datetime.now().isoformat(),
|
||||
"user_id": user_id,
|
||||
"message": message,
|
||||
"success": success,
|
||||
"mood": self.persona.get_current_state().current_mood,
|
||||
"relationship_score": self.persona.relationships.get_or_create_relationship(user_id).score
|
||||
}
|
||||
|
||||
self.transmissions.append(transmission)
|
||||
self._save_transmissions()
|
||||
|
||||
if success:
|
||||
self.logger.info(f"Successfully transmitted to {user_id}")
|
||||
else:
|
||||
self.logger.warning(f"Failed to transmit to {user_id}")
|
||||
|
||||
def get_transmission_stats(self, user_id: Optional[str] = None) -> Dict:
|
||||
"""Get transmission statistics"""
|
||||
if user_id:
|
||||
user_transmissions = [t for t in self.transmissions if t["user_id"] == user_id]
|
||||
else:
|
||||
user_transmissions = self.transmissions
|
||||
|
||||
if not user_transmissions:
|
||||
return {
|
||||
"total": 0,
|
||||
"successful": 0,
|
||||
"failed": 0,
|
||||
"success_rate": 0.0
|
||||
}
|
||||
|
||||
successful = sum(1 for t in user_transmissions if t["success"])
|
||||
total = len(user_transmissions)
|
||||
|
||||
return {
|
||||
"total": total,
|
||||
"successful": successful,
|
||||
"failed": total - successful,
|
||||
"success_rate": successful / total if total > 0 else 0.0
|
||||
}
|
||||
54
src/bin/test_config.rs
Normal file
54
src/bin/test_config.rs
Normal file
@@ -0,0 +1,54 @@
|
||||
use aigpt::config::Config;
|
||||
use anyhow::Result;
|
||||
|
||||
fn main() -> Result<()> {
|
||||
println!("Testing configuration loading...");
|
||||
|
||||
// Debug: check which JSON files exist
|
||||
let possible_paths = vec![
|
||||
"../config.json",
|
||||
"config.json",
|
||||
"gpt/config.json",
|
||||
"/Users/syui/ai/ai/gpt/config.json",
|
||||
];
|
||||
|
||||
println!("Checking for config.json files:");
|
||||
for path in &possible_paths {
|
||||
let path_buf = std::path::PathBuf::from(path);
|
||||
if path_buf.exists() {
|
||||
println!(" ✓ Found: {}", path);
|
||||
} else {
|
||||
println!(" ✗ Not found: {}", path);
|
||||
}
|
||||
}
|
||||
|
||||
// Load configuration
|
||||
let config = Config::new(None)?;
|
||||
|
||||
println!("Configuration loaded successfully!");
|
||||
println!("Default provider: {}", config.default_provider);
|
||||
println!("Available providers:");
|
||||
for (name, provider) in &config.providers {
|
||||
println!(" - {}: model={}, host={:?}",
|
||||
name,
|
||||
provider.default_model,
|
||||
provider.host);
|
||||
}
|
||||
|
||||
if let Some(mcp) = &config.mcp {
|
||||
println!("\nMCP Configuration:");
|
||||
println!(" Enabled: {}", mcp.enabled);
|
||||
println!(" Auto-detect: {}", mcp.auto_detect);
|
||||
println!(" Servers: {}", mcp.servers.len());
|
||||
}
|
||||
|
||||
if let Some(atproto) = &config.atproto {
|
||||
println!("\nATProto Configuration:");
|
||||
println!(" Host: {}", atproto.host);
|
||||
println!(" Handle: {:?}", atproto.handle);
|
||||
}
|
||||
|
||||
println!("\nConfig file path: {}", config.data_dir.join("config.json").display());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
36
src/cli/commands.rs
Normal file
36
src/cli/commands.rs
Normal file
@@ -0,0 +1,36 @@
|
||||
use clap::Subcommand;
|
||||
use std::path::PathBuf;
|
||||
|
||||
#[derive(Subcommand)]
|
||||
pub enum TokenCommands {
|
||||
/// Show Claude Code token usage summary and estimated costs
|
||||
Summary {
|
||||
/// Time period (today, week, month, all)
|
||||
#[arg(long, default_value = "today")]
|
||||
period: String,
|
||||
/// Claude Code data directory path
|
||||
#[arg(long)]
|
||||
claude_dir: Option<PathBuf>,
|
||||
/// Show detailed breakdown
|
||||
#[arg(long)]
|
||||
details: bool,
|
||||
/// Output format (table, json)
|
||||
#[arg(long, default_value = "table")]
|
||||
format: String,
|
||||
},
|
||||
/// Show daily token usage breakdown
|
||||
Daily {
|
||||
/// Number of days to show
|
||||
#[arg(long, default_value = "7")]
|
||||
days: u32,
|
||||
/// Claude Code data directory path
|
||||
#[arg(long)]
|
||||
claude_dir: Option<PathBuf>,
|
||||
},
|
||||
/// Check Claude Code data availability and basic stats
|
||||
Status {
|
||||
/// Claude Code data directory path
|
||||
#[arg(long)]
|
||||
claude_dir: Option<PathBuf>,
|
||||
},
|
||||
}
|
||||
140
src/cli/mod.rs
Normal file
140
src/cli/mod.rs
Normal file
@@ -0,0 +1,140 @@
|
||||
use std::path::PathBuf;
|
||||
use anyhow::Result;
|
||||
use crate::config::Config;
|
||||
use crate::mcp_server::MCPServer;
|
||||
use crate::persona::Persona;
|
||||
use crate::transmission::TransmissionController;
|
||||
use crate::scheduler::AIScheduler;
|
||||
|
||||
// Token commands enum (placeholder for tokens.rs)
|
||||
#[derive(Debug, clap::Subcommand)]
|
||||
pub enum TokenCommands {
|
||||
Analyze { file: PathBuf },
|
||||
Report { days: Option<u32> },
|
||||
Cost { month: Option<String> },
|
||||
Summary { period: Option<String>, claude_dir: Option<PathBuf>, details: bool, format: Option<String> },
|
||||
Daily { days: Option<u32>, claude_dir: Option<PathBuf> },
|
||||
Status { claude_dir: Option<PathBuf> },
|
||||
}
|
||||
|
||||
pub async fn handle_server(port: Option<u16>, data_dir: Option<PathBuf>) -> Result<()> {
|
||||
let port = port.unwrap_or(8080);
|
||||
let config = Config::new(data_dir.clone())?;
|
||||
|
||||
let mut server = MCPServer::new(config, "mcp_user".to_string(), data_dir)?;
|
||||
server.start_server(port).await
|
||||
}
|
||||
|
||||
pub async fn handle_chat(
|
||||
user_id: String,
|
||||
message: String,
|
||||
data_dir: Option<PathBuf>,
|
||||
model: Option<String>,
|
||||
provider: Option<String>,
|
||||
) -> Result<()> {
|
||||
let config = Config::new(data_dir)?;
|
||||
let mut persona = Persona::new(&config)?;
|
||||
|
||||
let (response, relationship_delta) = if provider.is_some() || model.is_some() {
|
||||
persona.process_ai_interaction(&user_id, &message, provider, model).await?
|
||||
} else {
|
||||
persona.process_interaction(&user_id, &message)?
|
||||
};
|
||||
|
||||
println!("AI Response: {}", response);
|
||||
println!("Relationship Change: {:+.2}", relationship_delta);
|
||||
|
||||
if let Some(relationship) = persona.get_relationship(&user_id) {
|
||||
println!("Relationship Status: {} (Score: {:.2})",
|
||||
relationship.status, relationship.score);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn handle_fortune(data_dir: Option<PathBuf>) -> Result<()> {
|
||||
let config = Config::new(data_dir)?;
|
||||
let persona = Persona::new(&config)?;
|
||||
|
||||
let state = persona.get_current_state()?;
|
||||
println!("🔮 Today's Fortune: {}", state.fortune_value);
|
||||
println!("😊 Current Mood: {}", state.current_mood);
|
||||
println!("✨ Breakthrough Status: {}",
|
||||
if state.breakthrough_triggered { "Active" } else { "Inactive" });
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn handle_relationships(data_dir: Option<PathBuf>) -> Result<()> {
|
||||
let config = Config::new(data_dir)?;
|
||||
let persona = Persona::new(&config)?;
|
||||
|
||||
let relationships = persona.list_all_relationships();
|
||||
|
||||
if relationships.is_empty() {
|
||||
println!("No relationships found.");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
println!("📊 Relationships ({}):", relationships.len());
|
||||
for (user_id, rel) in relationships {
|
||||
println!(" {} - {} (Score: {:.2}, Interactions: {})",
|
||||
user_id, rel.status, rel.score, rel.total_interactions);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn handle_transmit(data_dir: Option<PathBuf>) -> Result<()> {
|
||||
let config = Config::new(data_dir)?;
|
||||
let mut persona = Persona::new(&config)?;
|
||||
let mut transmission_controller = TransmissionController::new(config)?;
|
||||
|
||||
let autonomous = transmission_controller.check_autonomous_transmissions(&mut persona).await?;
|
||||
let breakthrough = transmission_controller.check_breakthrough_transmissions(&mut persona).await?;
|
||||
let maintenance = transmission_controller.check_maintenance_transmissions(&mut persona).await?;
|
||||
|
||||
let total = autonomous.len() + breakthrough.len() + maintenance.len();
|
||||
|
||||
println!("📡 Transmission Check Complete:");
|
||||
println!(" Autonomous: {}", autonomous.len());
|
||||
println!(" Breakthrough: {}", breakthrough.len());
|
||||
println!(" Maintenance: {}", maintenance.len());
|
||||
println!(" Total: {}", total);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn handle_maintenance(data_dir: Option<PathBuf>) -> Result<()> {
|
||||
let config = Config::new(data_dir)?;
|
||||
let mut persona = Persona::new(&config)?;
|
||||
let mut transmission_controller = TransmissionController::new(config)?;
|
||||
|
||||
persona.daily_maintenance()?;
|
||||
let maintenance_transmissions = transmission_controller.check_maintenance_transmissions(&mut persona).await?;
|
||||
|
||||
let stats = persona.get_relationship_stats();
|
||||
|
||||
println!("🔧 Daily maintenance completed");
|
||||
println!("📤 Maintenance transmissions sent: {}", maintenance_transmissions.len());
|
||||
println!("📊 Relationship stats: {:?}", stats);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn handle_schedule(data_dir: Option<PathBuf>) -> Result<()> {
|
||||
let config = Config::new(data_dir)?;
|
||||
let mut persona = Persona::new(&config)?;
|
||||
let mut transmission_controller = TransmissionController::new(config.clone())?;
|
||||
let mut scheduler = AIScheduler::new(&config)?;
|
||||
|
||||
let executions = scheduler.run_scheduled_tasks(&mut persona, &mut transmission_controller).await?;
|
||||
let stats = scheduler.get_scheduler_stats();
|
||||
|
||||
println!("⏰ Scheduler run completed");
|
||||
println!("📋 Tasks executed: {}", executions.len());
|
||||
println!("📊 Stats: {} total tasks, {} enabled, {:.2}% success rate",
|
||||
stats.total_tasks, stats.enabled_tasks, stats.success_rate);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
250
src/config.rs
Normal file
250
src/config.rs
Normal file
@@ -0,0 +1,250 @@
|
||||
use std::path::PathBuf;
|
||||
use std::collections::HashMap;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use anyhow::{Result, Context};
|
||||
|
||||
use crate::ai_provider::{AIConfig, AIProvider};
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Config {
|
||||
#[serde(skip)]
|
||||
pub data_dir: PathBuf,
|
||||
pub default_provider: String,
|
||||
pub providers: HashMap<String, ProviderConfig>,
|
||||
#[serde(default)]
|
||||
pub atproto: Option<AtprotoConfig>,
|
||||
#[serde(default)]
|
||||
pub mcp: Option<McpConfig>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ProviderConfig {
|
||||
pub default_model: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub host: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub api_key: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub system_prompt: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct AtprotoConfig {
|
||||
pub handle: Option<String>,
|
||||
pub password: Option<String>,
|
||||
pub host: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct McpConfig {
|
||||
#[serde(deserialize_with = "string_to_bool")]
|
||||
pub enabled: bool,
|
||||
#[serde(deserialize_with = "string_to_bool")]
|
||||
pub auto_detect: bool,
|
||||
pub servers: HashMap<String, McpServerConfig>,
|
||||
}
|
||||
|
||||
fn string_to_bool<'de, D>(deserializer: D) -> Result<bool, D::Error>
|
||||
where
|
||||
D: serde::Deserializer<'de>,
|
||||
{
|
||||
use serde::Deserialize;
|
||||
let s = String::deserialize(deserializer)?;
|
||||
match s.as_str() {
|
||||
"true" => Ok(true),
|
||||
"false" => Ok(false),
|
||||
_ => Err(serde::de::Error::custom("expected 'true' or 'false'")),
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct McpServerConfig {
|
||||
pub base_url: String,
|
||||
pub name: String,
|
||||
#[serde(deserialize_with = "string_to_f64")]
|
||||
pub timeout: f64,
|
||||
pub endpoints: HashMap<String, String>,
|
||||
}
|
||||
|
||||
fn string_to_f64<'de, D>(deserializer: D) -> Result<f64, D::Error>
|
||||
where
|
||||
D: serde::Deserializer<'de>,
|
||||
{
|
||||
use serde::Deserialize;
|
||||
let s = String::deserialize(deserializer)?;
|
||||
s.parse::<f64>().map_err(serde::de::Error::custom)
|
||||
}
|
||||
|
||||
impl Config {
|
||||
pub fn new(data_dir: Option<PathBuf>) -> Result<Self> {
|
||||
let data_dir = data_dir.unwrap_or_else(|| {
|
||||
dirs::config_dir()
|
||||
.unwrap_or_else(|| PathBuf::from("."))
|
||||
.join("syui")
|
||||
.join("ai")
|
||||
.join("gpt")
|
||||
});
|
||||
|
||||
// Ensure data directory exists
|
||||
std::fs::create_dir_all(&data_dir)
|
||||
.context("Failed to create data directory")?;
|
||||
|
||||
let config_path = data_dir.join("config.json");
|
||||
|
||||
// Try to load existing config
|
||||
if config_path.exists() {
|
||||
let config_str = std::fs::read_to_string(&config_path)
|
||||
.context("Failed to read config.json")?;
|
||||
|
||||
// Check if file is empty
|
||||
if config_str.trim().is_empty() {
|
||||
eprintln!("Config file is empty, will recreate from source");
|
||||
} else {
|
||||
match serde_json::from_str::<Config>(&config_str) {
|
||||
Ok(mut config) => {
|
||||
config.data_dir = data_dir;
|
||||
// Check for environment variables if API keys are empty
|
||||
if let Some(openai_config) = config.providers.get_mut("openai") {
|
||||
if openai_config.api_key.as_ref().map_or(true, |key| key.is_empty()) {
|
||||
openai_config.api_key = std::env::var("OPENAI_API_KEY").ok();
|
||||
}
|
||||
}
|
||||
return Ok(config);
|
||||
}
|
||||
Err(e) => {
|
||||
eprintln!("Failed to parse existing config.json: {}", e);
|
||||
eprintln!("Will try to reload from source...");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check if we need to migrate from JSON
|
||||
// Try multiple locations for the JSON file
|
||||
let possible_json_paths = vec![
|
||||
PathBuf::from("../config.json"), // Relative to aigpt-rs directory
|
||||
PathBuf::from("config.json"), // Current directory
|
||||
PathBuf::from("gpt/config.json"), // From project root
|
||||
PathBuf::from("/Users/syui/ai/ai/gpt/config.json"), // Absolute path
|
||||
];
|
||||
|
||||
for json_path in possible_json_paths {
|
||||
if json_path.exists() {
|
||||
eprintln!("Found config.json at: {}", json_path.display());
|
||||
eprintln!("Copying configuration...");
|
||||
// Copy configuration file and parse it
|
||||
std::fs::copy(&json_path, &config_path)
|
||||
.context("Failed to copy config.json")?;
|
||||
|
||||
let config_str = std::fs::read_to_string(&config_path)
|
||||
.context("Failed to read copied config.json")?;
|
||||
|
||||
println!("Config JSON content preview: {}", &config_str[..std::cmp::min(200, config_str.len())]);
|
||||
|
||||
let mut config: Config = serde_json::from_str(&config_str)
|
||||
.context("Failed to parse config.json")?;
|
||||
config.data_dir = data_dir;
|
||||
// Check for environment variables if API keys are empty
|
||||
if let Some(openai_config) = config.providers.get_mut("openai") {
|
||||
if openai_config.api_key.as_ref().map_or(true, |key| key.is_empty()) {
|
||||
openai_config.api_key = std::env::var("OPENAI_API_KEY").ok();
|
||||
}
|
||||
}
|
||||
eprintln!("Copy complete! Config saved to: {}", config_path.display());
|
||||
return Ok(config);
|
||||
}
|
||||
}
|
||||
|
||||
// Create default config
|
||||
let config = Self::default_config(data_dir);
|
||||
|
||||
// Save default config
|
||||
let json_str = serde_json::to_string_pretty(&config)
|
||||
.context("Failed to serialize default config")?;
|
||||
std::fs::write(&config_path, json_str)
|
||||
.context("Failed to write default config.json")?;
|
||||
|
||||
Ok(config)
|
||||
}
|
||||
|
||||
pub fn save(&self) -> Result<()> {
|
||||
let config_path = self.data_dir.join("config.json");
|
||||
let json_str = serde_json::to_string_pretty(self)
|
||||
.context("Failed to serialize config")?;
|
||||
std::fs::write(&config_path, json_str)
|
||||
.context("Failed to write config.json")?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn default_config(data_dir: PathBuf) -> Self {
|
||||
let mut providers = HashMap::new();
|
||||
|
||||
providers.insert("ollama".to_string(), ProviderConfig {
|
||||
default_model: "qwen2.5".to_string(),
|
||||
host: Some("http://localhost:11434".to_string()),
|
||||
api_key: None,
|
||||
system_prompt: None,
|
||||
});
|
||||
|
||||
providers.insert("openai".to_string(), ProviderConfig {
|
||||
default_model: "gpt-4o-mini".to_string(),
|
||||
host: None,
|
||||
api_key: std::env::var("OPENAI_API_KEY").ok(),
|
||||
system_prompt: None,
|
||||
});
|
||||
|
||||
Config {
|
||||
data_dir,
|
||||
default_provider: "ollama".to_string(),
|
||||
providers,
|
||||
atproto: None,
|
||||
mcp: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_provider(&self, provider_name: &str) -> Option<&ProviderConfig> {
|
||||
self.providers.get(provider_name)
|
||||
}
|
||||
|
||||
pub fn get_ai_config(&self, provider: Option<String>, model: Option<String>) -> Result<AIConfig> {
|
||||
let provider_name = provider.as_deref().unwrap_or(&self.default_provider);
|
||||
let provider_config = self.get_provider(provider_name)
|
||||
.ok_or_else(|| anyhow::anyhow!("Unknown provider: {}", provider_name))?;
|
||||
|
||||
let ai_provider: AIProvider = provider_name.parse()?;
|
||||
let model_name = model.unwrap_or_else(|| provider_config.default_model.clone());
|
||||
|
||||
Ok(AIConfig {
|
||||
provider: ai_provider,
|
||||
model: model_name,
|
||||
api_key: provider_config.api_key.clone(),
|
||||
base_url: provider_config.host.clone(),
|
||||
max_tokens: Some(2048),
|
||||
temperature: Some(0.7),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn memory_file(&self) -> PathBuf {
|
||||
self.data_dir.join("memories.json")
|
||||
}
|
||||
|
||||
pub fn relationships_file(&self) -> PathBuf {
|
||||
self.data_dir.join("relationships.json")
|
||||
}
|
||||
|
||||
pub fn fortune_file(&self) -> PathBuf {
|
||||
self.data_dir.join("fortune.json")
|
||||
}
|
||||
|
||||
pub fn transmission_file(&self) -> PathBuf {
|
||||
self.data_dir.join("transmissions.json")
|
||||
}
|
||||
|
||||
pub fn scheduler_tasks_file(&self) -> PathBuf {
|
||||
self.data_dir.join("scheduler_tasks.json")
|
||||
}
|
||||
|
||||
pub fn scheduler_history_file(&self) -> PathBuf {
|
||||
self.data_dir.join("scheduler_history.json")
|
||||
}
|
||||
}
|
||||
205
src/conversation.rs
Normal file
205
src/conversation.rs
Normal file
@@ -0,0 +1,205 @@
|
||||
use std::path::PathBuf;
|
||||
use std::io::{self, Write};
|
||||
use anyhow::Result;
|
||||
use colored::*;
|
||||
|
||||
use crate::config::Config;
|
||||
use crate::persona::Persona;
|
||||
use crate::http_client::ServiceDetector;
|
||||
|
||||
pub async fn handle_conversation(
|
||||
user_id: String,
|
||||
data_dir: Option<PathBuf>,
|
||||
model: Option<String>,
|
||||
provider: Option<String>,
|
||||
) -> Result<()> {
|
||||
let config = Config::new(data_dir)?;
|
||||
let mut persona = Persona::new(&config)?;
|
||||
|
||||
println!("{}", "Starting conversation mode...".cyan());
|
||||
println!("{}", "Type your message and press Enter to chat.".yellow());
|
||||
println!("{}", "Available MCP commands: /memories, /search, /context, /relationship, /cards".yellow());
|
||||
println!("{}", "Type 'exit', 'quit', or 'bye' to end conversation.".yellow());
|
||||
println!("{}", "---".dimmed());
|
||||
|
||||
let mut conversation_history = Vec::new();
|
||||
let service_detector = ServiceDetector::new();
|
||||
|
||||
loop {
|
||||
// Print prompt
|
||||
print!("{} ", "You:".cyan().bold());
|
||||
io::stdout().flush()?;
|
||||
|
||||
// Read user input
|
||||
let mut input = String::new();
|
||||
io::stdin().read_line(&mut input)?;
|
||||
let input = input.trim();
|
||||
|
||||
// Check for exit commands
|
||||
if matches!(input.to_lowercase().as_str(), "exit" | "quit" | "bye" | "") {
|
||||
println!("{}", "Goodbye! 👋".green());
|
||||
break;
|
||||
}
|
||||
|
||||
// Handle MCP commands
|
||||
if input.starts_with('/') {
|
||||
handle_mcp_command(input, &user_id, &service_detector).await?;
|
||||
continue;
|
||||
}
|
||||
|
||||
// Add to conversation history
|
||||
conversation_history.push(format!("User: {}", input));
|
||||
|
||||
// Get AI response
|
||||
let (response, relationship_delta) = if provider.is_some() || model.is_some() {
|
||||
persona.process_ai_interaction(&user_id, input, provider.clone(), model.clone()).await?
|
||||
} else {
|
||||
persona.process_interaction(&user_id, input)?
|
||||
};
|
||||
|
||||
// Add AI response to history
|
||||
conversation_history.push(format!("AI: {}", response));
|
||||
|
||||
// Display response
|
||||
println!("{} {}", "AI:".green().bold(), response);
|
||||
|
||||
// Show relationship change if significant
|
||||
if relationship_delta.abs() >= 0.1 {
|
||||
if relationship_delta > 0.0 {
|
||||
println!("{}", format!(" └─ (+{:.2} relationship)", relationship_delta).green().dimmed());
|
||||
} else {
|
||||
println!("{}", format!(" └─ ({:.2} relationship)", relationship_delta).red().dimmed());
|
||||
}
|
||||
}
|
||||
|
||||
println!(); // Add some spacing
|
||||
|
||||
// Keep conversation history manageable (last 20 exchanges)
|
||||
if conversation_history.len() > 40 {
|
||||
conversation_history.drain(0..20);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn handle_mcp_command(
|
||||
command: &str,
|
||||
user_id: &str,
|
||||
service_detector: &ServiceDetector,
|
||||
) -> Result<()> {
|
||||
let parts: Vec<&str> = command[1..].split_whitespace().collect();
|
||||
if parts.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
match parts[0] {
|
||||
"memories" => {
|
||||
println!("{}", "Retrieving memories...".yellow());
|
||||
|
||||
// Get contextual memories
|
||||
if let Ok(memories) = service_detector.get_contextual_memories(user_id, 10).await {
|
||||
if memories.is_empty() {
|
||||
println!("No memories found for this conversation.");
|
||||
} else {
|
||||
println!("{}", format!("Found {} memories:", memories.len()).cyan());
|
||||
for (i, memory) in memories.iter().enumerate() {
|
||||
println!(" {}. {}", i + 1, memory.content);
|
||||
println!(" {}", format!("({})", memory.created_at.format("%Y-%m-%d %H:%M")).dimmed());
|
||||
}
|
||||
}
|
||||
} else {
|
||||
println!("{}", "Failed to retrieve memories.".red());
|
||||
}
|
||||
},
|
||||
|
||||
"search" => {
|
||||
if parts.len() < 2 {
|
||||
println!("{}", "Usage: /search <query>".yellow());
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let query = parts[1..].join(" ");
|
||||
println!("{}", format!("Searching for: '{}'", query).yellow());
|
||||
|
||||
if let Ok(results) = service_detector.search_memories(&query, 5).await {
|
||||
if results.is_empty() {
|
||||
println!("No relevant memories found.");
|
||||
} else {
|
||||
println!("{}", format!("Found {} relevant memories:", results.len()).cyan());
|
||||
for (i, memory) in results.iter().enumerate() {
|
||||
println!(" {}. {}", i + 1, memory.content);
|
||||
println!(" {}", format!("({})", memory.created_at.format("%Y-%m-%d %H:%M")).dimmed());
|
||||
}
|
||||
}
|
||||
} else {
|
||||
println!("{}", "Search failed.".red());
|
||||
}
|
||||
},
|
||||
|
||||
"context" => {
|
||||
println!("{}", "Creating context summary...".yellow());
|
||||
|
||||
if let Ok(summary) = service_detector.create_summary(user_id).await {
|
||||
println!("{}", "Context Summary:".cyan().bold());
|
||||
println!("{}", summary);
|
||||
} else {
|
||||
println!("{}", "Failed to create context summary.".red());
|
||||
}
|
||||
},
|
||||
|
||||
"relationship" => {
|
||||
println!("{}", "Checking relationship status...".yellow());
|
||||
|
||||
// This would need to be implemented in the service client
|
||||
println!("{}", "Relationship status: Active".cyan());
|
||||
println!("Score: 85.5 / 100");
|
||||
println!("Transmission: ✓ Enabled");
|
||||
},
|
||||
|
||||
"cards" => {
|
||||
println!("{}", "Checking card collection...".yellow());
|
||||
|
||||
// Try to connect to ai.card service
|
||||
if let Ok(stats) = service_detector.get_card_stats().await {
|
||||
println!("{}", "Card Collection:".cyan().bold());
|
||||
println!(" Total Cards: {}", stats.get("total").unwrap_or(&serde_json::Value::Number(0.into())));
|
||||
println!(" Unique Cards: {}", stats.get("unique").unwrap_or(&serde_json::Value::Number(0.into())));
|
||||
|
||||
// Offer to draw a card
|
||||
println!("\n{}", "Would you like to draw a card? (y/n)".yellow());
|
||||
let mut response = String::new();
|
||||
io::stdin().read_line(&mut response)?;
|
||||
if response.trim().to_lowercase() == "y" {
|
||||
println!("{}", "Drawing card...".cyan());
|
||||
if let Ok(card) = service_detector.draw_card(user_id, false).await {
|
||||
println!("{}", "🎴 Card drawn!".green().bold());
|
||||
println!("Name: {}", card.get("name").unwrap_or(&serde_json::Value::String("Unknown".to_string())));
|
||||
println!("Rarity: {}", card.get("rarity").unwrap_or(&serde_json::Value::String("Unknown".to_string())));
|
||||
} else {
|
||||
println!("{}", "Failed to draw card. ai.card service might not be running.".red());
|
||||
}
|
||||
}
|
||||
} else {
|
||||
println!("{}", "ai.card service not available.".red());
|
||||
}
|
||||
},
|
||||
|
||||
"help" | "h" => {
|
||||
println!("{}", "Available MCP Commands:".cyan().bold());
|
||||
println!(" {:<15} - Show recent memories for this conversation", "/memories".yellow());
|
||||
println!(" {:<15} - Search memories by keyword", "/search <query>".yellow());
|
||||
println!(" {:<15} - Create a context summary", "/context".yellow());
|
||||
println!(" {:<15} - Show relationship status", "/relationship".yellow());
|
||||
println!(" {:<15} - Show card collection and draw cards", "/cards".yellow());
|
||||
println!(" {:<15} - Show this help message", "/help".yellow());
|
||||
},
|
||||
|
||||
_ => {
|
||||
println!("{}", format!("Unknown command: /{}. Type '/help' for available commands.", parts[0]).red());
|
||||
}
|
||||
}
|
||||
|
||||
println!(); // Add spacing after MCP command output
|
||||
Ok(())
|
||||
}
|
||||
789
src/docs.rs
Normal file
789
src/docs.rs
Normal file
@@ -0,0 +1,789 @@
|
||||
use std::collections::HashMap;
|
||||
use std::path::PathBuf;
|
||||
use anyhow::{Result, Context};
|
||||
use colored::*;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use chrono::Utc;
|
||||
|
||||
use crate::config::Config;
|
||||
use crate::persona::Persona;
|
||||
use crate::ai_provider::{AIProviderClient, AIConfig, AIProvider};
|
||||
|
||||
pub async fn handle_docs(
|
||||
action: String,
|
||||
project: Option<String>,
|
||||
output: Option<PathBuf>,
|
||||
ai_integration: bool,
|
||||
data_dir: Option<PathBuf>,
|
||||
) -> Result<()> {
|
||||
let config = Config::new(data_dir)?;
|
||||
let mut docs_manager = DocsManager::new(config);
|
||||
|
||||
match action.as_str() {
|
||||
"generate" => {
|
||||
if let Some(project_name) = project {
|
||||
docs_manager.generate_project_docs(&project_name, output, ai_integration).await?;
|
||||
} else {
|
||||
return Err(anyhow::anyhow!("Project name is required for generate action"));
|
||||
}
|
||||
}
|
||||
"sync" => {
|
||||
if let Some(project_name) = project {
|
||||
docs_manager.sync_project_docs(&project_name).await?;
|
||||
} else {
|
||||
docs_manager.sync_all_docs().await?;
|
||||
}
|
||||
}
|
||||
"list" => {
|
||||
docs_manager.list_projects().await?;
|
||||
}
|
||||
"status" => {
|
||||
docs_manager.show_docs_status().await?;
|
||||
}
|
||||
"session-end" => {
|
||||
docs_manager.session_end_processing().await?;
|
||||
}
|
||||
_ => {
|
||||
return Err(anyhow::anyhow!("Unknown docs action: {}", action));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ProjectInfo {
|
||||
pub name: String,
|
||||
pub project_type: String,
|
||||
pub description: String,
|
||||
pub status: String,
|
||||
pub features: Vec<String>,
|
||||
pub dependencies: Vec<String>,
|
||||
}
|
||||
|
||||
impl Default for ProjectInfo {
|
||||
fn default() -> Self {
|
||||
ProjectInfo {
|
||||
name: String::new(),
|
||||
project_type: String::new(),
|
||||
description: String::new(),
|
||||
status: "active".to_string(),
|
||||
features: Vec::new(),
|
||||
dependencies: Vec::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct DocsManager {
|
||||
config: Config,
|
||||
ai_root: PathBuf,
|
||||
projects: HashMap<String, ProjectInfo>,
|
||||
}
|
||||
|
||||
impl DocsManager {
|
||||
pub fn new(config: Config) -> Self {
|
||||
let ai_root = dirs::home_dir()
|
||||
.unwrap_or_else(|| PathBuf::from("."))
|
||||
.join("ai")
|
||||
.join("ai");
|
||||
|
||||
DocsManager {
|
||||
config,
|
||||
ai_root,
|
||||
projects: HashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn generate_project_docs(&mut self, project: &str, output: Option<PathBuf>, ai_integration: bool) -> Result<()> {
|
||||
println!("{}", format!("📝 Generating documentation for project '{}'", project).cyan().bold());
|
||||
|
||||
// Load project information
|
||||
let project_info = self.load_project_info(project)?;
|
||||
|
||||
// Generate documentation content
|
||||
let mut content = self.generate_base_documentation(&project_info)?;
|
||||
|
||||
// AI enhancement if requested
|
||||
if ai_integration {
|
||||
println!("{}", "🤖 Enhancing documentation with AI...".blue());
|
||||
if let Ok(enhanced_content) = self.enhance_with_ai(project, &content).await {
|
||||
content = enhanced_content;
|
||||
} else {
|
||||
println!("{}", "Warning: AI enhancement failed, using base documentation".yellow());
|
||||
}
|
||||
}
|
||||
|
||||
// Determine output path
|
||||
let output_path = if let Some(path) = output {
|
||||
path
|
||||
} else {
|
||||
self.ai_root.join(project).join("claude.md")
|
||||
};
|
||||
|
||||
// Ensure directory exists
|
||||
if let Some(parent) = output_path.parent() {
|
||||
std::fs::create_dir_all(parent)
|
||||
.with_context(|| format!("Failed to create directory: {}", parent.display()))?;
|
||||
}
|
||||
|
||||
// Write documentation
|
||||
std::fs::write(&output_path, content)
|
||||
.with_context(|| format!("Failed to write documentation to: {}", output_path.display()))?;
|
||||
|
||||
println!("{}", format!("✅ Documentation generated: {}", output_path.display()).green().bold());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn sync_project_docs(&self, project: &str) -> Result<()> {
|
||||
println!("{}", format!("🔄 Syncing documentation for project '{}'", project).cyan().bold());
|
||||
|
||||
let claude_dir = self.ai_root.join("claude");
|
||||
let project_dir = self.ai_root.join(project);
|
||||
|
||||
// Check if claude directory exists
|
||||
if !claude_dir.exists() {
|
||||
return Err(anyhow::anyhow!("Claude directory not found: {}", claude_dir.display()));
|
||||
}
|
||||
|
||||
// Copy relevant files
|
||||
let files_to_sync = vec!["README.md", "claude.md", "DEVELOPMENT.md"];
|
||||
|
||||
for file in files_to_sync {
|
||||
let src = claude_dir.join("projects").join(format!("{}.md", project));
|
||||
let dst = project_dir.join(file);
|
||||
|
||||
if src.exists() {
|
||||
if let Some(parent) = dst.parent() {
|
||||
std::fs::create_dir_all(parent)?;
|
||||
}
|
||||
std::fs::copy(&src, &dst)?;
|
||||
println!(" ✓ Synced: {}", file.green());
|
||||
}
|
||||
}
|
||||
|
||||
println!("{}", "✅ Documentation sync completed".green().bold());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn sync_all_docs(&self) -> Result<()> {
|
||||
println!("{}", "🔄 Syncing documentation for all projects...".cyan().bold());
|
||||
|
||||
// Find all project directories
|
||||
let projects = self.discover_projects()?;
|
||||
|
||||
for project in projects {
|
||||
println!("\n{}", format!("Syncing: {}", project).blue());
|
||||
if let Err(e) = self.sync_project_docs(&project).await {
|
||||
println!("{}: {}", "Warning".yellow(), e);
|
||||
}
|
||||
}
|
||||
|
||||
// Generate ai.wiki content after all project syncs
|
||||
println!("\n{}", "📝 Updating ai.wiki...".blue());
|
||||
if let Err(e) = self.update_ai_wiki().await {
|
||||
println!("{}: Failed to update ai.wiki: {}", "Warning".yellow(), e);
|
||||
}
|
||||
|
||||
// Update repository wiki (Gitea wiki) as well
|
||||
println!("\n{}", "📝 Updating repository wiki...".blue());
|
||||
if let Err(e) = self.update_repository_wiki().await {
|
||||
println!("{}: Failed to update repository wiki: {}", "Warning".yellow(), e);
|
||||
}
|
||||
|
||||
println!("\n{}", "✅ All projects synced".green().bold());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn list_projects(&mut self) -> Result<()> {
|
||||
println!("{}", "📋 Available Projects".cyan().bold());
|
||||
println!();
|
||||
|
||||
let projects = self.discover_projects()?;
|
||||
|
||||
if projects.is_empty() {
|
||||
println!("{}", "No projects found".yellow());
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Load project information
|
||||
for project in &projects {
|
||||
if let Ok(info) = self.load_project_info(project) {
|
||||
self.projects.insert(project.clone(), info);
|
||||
}
|
||||
}
|
||||
|
||||
// Display projects in a table format
|
||||
println!("{:<20} {:<15} {:<15} {}",
|
||||
"Project".cyan().bold(),
|
||||
"Type".cyan().bold(),
|
||||
"Status".cyan().bold(),
|
||||
"Description".cyan().bold());
|
||||
println!("{}", "-".repeat(80));
|
||||
|
||||
let project_count = projects.len();
|
||||
for project in &projects {
|
||||
let info = self.projects.get(project).cloned().unwrap_or_default();
|
||||
let status_color = match info.status.as_str() {
|
||||
"active" => info.status.green(),
|
||||
"development" => info.status.yellow(),
|
||||
"deprecated" => info.status.red(),
|
||||
_ => info.status.normal(),
|
||||
};
|
||||
|
||||
println!("{:<20} {:<15} {:<15} {}",
|
||||
project.blue(),
|
||||
info.project_type,
|
||||
status_color,
|
||||
info.description);
|
||||
}
|
||||
|
||||
println!();
|
||||
println!("Total projects: {}", project_count.to_string().cyan());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn show_docs_status(&self) -> Result<()> {
|
||||
println!("{}", "📊 Documentation Status".cyan().bold());
|
||||
println!();
|
||||
|
||||
let projects = self.discover_projects()?;
|
||||
let mut total_files = 0;
|
||||
let mut total_lines = 0;
|
||||
|
||||
for project in projects {
|
||||
let project_dir = self.ai_root.join(&project);
|
||||
let claude_md = project_dir.join("claude.md");
|
||||
|
||||
if claude_md.exists() {
|
||||
let content = std::fs::read_to_string(&claude_md)?;
|
||||
let lines = content.lines().count();
|
||||
let size = content.len();
|
||||
|
||||
println!("{}: {} lines, {} bytes",
|
||||
project.blue(),
|
||||
lines.to_string().yellow(),
|
||||
size.to_string().yellow());
|
||||
|
||||
total_files += 1;
|
||||
total_lines += lines;
|
||||
} else {
|
||||
println!("{}: {}", project.blue(), "No documentation".red());
|
||||
}
|
||||
}
|
||||
|
||||
println!();
|
||||
println!("Summary: {} files, {} total lines",
|
||||
total_files.to_string().cyan(),
|
||||
total_lines.to_string().cyan());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn discover_projects(&self) -> Result<Vec<String>> {
|
||||
let mut projects = Vec::new();
|
||||
|
||||
// Known project directories
|
||||
let known_projects = vec![
|
||||
"gpt", "card", "bot", "shell", "os", "game", "moji", "verse"
|
||||
];
|
||||
|
||||
for project in known_projects {
|
||||
let project_dir = self.ai_root.join(project);
|
||||
if project_dir.exists() && project_dir.is_dir() {
|
||||
projects.push(project.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
// Also scan for additional directories with ai.json
|
||||
if self.ai_root.exists() {
|
||||
for entry in std::fs::read_dir(&self.ai_root)? {
|
||||
let entry = entry?;
|
||||
let path = entry.path();
|
||||
|
||||
if path.is_dir() {
|
||||
let ai_json = path.join("ai.json");
|
||||
if ai_json.exists() {
|
||||
if let Some(name) = path.file_name().and_then(|n| n.to_str()) {
|
||||
if !projects.contains(&name.to_string()) {
|
||||
projects.push(name.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
projects.sort();
|
||||
Ok(projects)
|
||||
}
|
||||
|
||||
fn load_project_info(&self, project: &str) -> Result<ProjectInfo> {
|
||||
let ai_json_path = self.ai_root.join(project).join("ai.json");
|
||||
|
||||
if ai_json_path.exists() {
|
||||
let content = std::fs::read_to_string(&ai_json_path)?;
|
||||
if let Ok(json_data) = serde_json::from_str::<serde_json::Value>(&content) {
|
||||
let mut info = ProjectInfo::default();
|
||||
info.name = project.to_string();
|
||||
|
||||
if let Some(project_data) = json_data.get(project) {
|
||||
if let Some(type_str) = project_data.get("type").and_then(|v| v.as_str()) {
|
||||
info.project_type = type_str.to_string();
|
||||
}
|
||||
if let Some(desc) = project_data.get("description").and_then(|v| v.as_str()) {
|
||||
info.description = desc.to_string();
|
||||
}
|
||||
}
|
||||
|
||||
return Ok(info);
|
||||
}
|
||||
}
|
||||
|
||||
// Default project info based on known projects
|
||||
let mut info = ProjectInfo::default();
|
||||
info.name = project.to_string();
|
||||
|
||||
match project {
|
||||
"gpt" => {
|
||||
info.project_type = "AI".to_string();
|
||||
info.description = "Autonomous transmission AI with unique personality".to_string();
|
||||
}
|
||||
"card" => {
|
||||
info.project_type = "Game".to_string();
|
||||
info.description = "Card game system with atproto integration".to_string();
|
||||
}
|
||||
"bot" => {
|
||||
info.project_type = "Bot".to_string();
|
||||
info.description = "Distributed SNS bot for AI ecosystem".to_string();
|
||||
}
|
||||
"shell" => {
|
||||
info.project_type = "Tool".to_string();
|
||||
info.description = "AI-powered shell interface".to_string();
|
||||
}
|
||||
"os" => {
|
||||
info.project_type = "OS".to_string();
|
||||
info.description = "Game-oriented operating system".to_string();
|
||||
}
|
||||
"verse" => {
|
||||
info.project_type = "Metaverse".to_string();
|
||||
info.description = "Reality-reflecting 3D world system".to_string();
|
||||
}
|
||||
_ => {
|
||||
info.project_type = "Unknown".to_string();
|
||||
info.description = format!("AI ecosystem project: {}", project);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(info)
|
||||
}
|
||||
|
||||
fn generate_base_documentation(&self, project_info: &ProjectInfo) -> Result<String> {
|
||||
let timestamp = Utc::now().format("%Y-%m-%d %H:%M:%S UTC");
|
||||
|
||||
let mut content = String::new();
|
||||
content.push_str(&format!("# {}\n\n", project_info.name));
|
||||
content.push_str(&format!("## Overview\n\n"));
|
||||
content.push_str(&format!("**Type**: {}\n\n", project_info.project_type));
|
||||
content.push_str(&format!("**Description**: {}\n\n", project_info.description));
|
||||
content.push_str(&format!("**Status**: {}\n\n", project_info.status));
|
||||
|
||||
if !project_info.features.is_empty() {
|
||||
content.push_str("## Features\n\n");
|
||||
for feature in &project_info.features {
|
||||
content.push_str(&format!("- {}\n", feature));
|
||||
}
|
||||
content.push_str("\n");
|
||||
}
|
||||
|
||||
content.push_str("## Architecture\n\n");
|
||||
content.push_str("This project is part of the ai ecosystem, following the core principles:\n\n");
|
||||
content.push_str("- **Existence Theory**: Based on the exploration of the smallest units (ai/existon)\n");
|
||||
content.push_str("- **Uniqueness Principle**: Ensuring 1:1 mapping between reality and digital existence\n");
|
||||
content.push_str("- **Reality Reflection**: Creating circular influence between reality and game\n\n");
|
||||
|
||||
content.push_str("## Development\n\n");
|
||||
content.push_str("### Getting Started\n\n");
|
||||
content.push_str("```bash\n");
|
||||
content.push_str(&format!("# Clone the repository\n"));
|
||||
content.push_str(&format!("git clone https://git.syui.ai/ai/{}\n", project_info.name));
|
||||
content.push_str(&format!("cd {}\n", project_info.name));
|
||||
content.push_str("```\n\n");
|
||||
|
||||
content.push_str("### Configuration\n\n");
|
||||
content.push_str(&format!("Configuration files are stored in `~/.config/syui/ai/{}/`\n\n", project_info.name));
|
||||
|
||||
content.push_str("## Integration\n\n");
|
||||
content.push_str("This project integrates with other ai ecosystem components:\n\n");
|
||||
if !project_info.dependencies.is_empty() {
|
||||
for dep in &project_info.dependencies {
|
||||
content.push_str(&format!("- **{}**: Core dependency\n", dep));
|
||||
}
|
||||
} else {
|
||||
content.push_str("- **ai.gpt**: Core AI personality system\n");
|
||||
content.push_str("- **atproto**: Distributed identity and data\n");
|
||||
}
|
||||
content.push_str("\n");
|
||||
|
||||
content.push_str("---\n\n");
|
||||
content.push_str(&format!("*Generated: {}*\n", timestamp));
|
||||
content.push_str("*🤖 Generated with [Claude Code](https://claude.ai/code)*\n");
|
||||
|
||||
Ok(content)
|
||||
}
|
||||
|
||||
async fn enhance_with_ai(&self, project: &str, base_content: &str) -> Result<String> {
|
||||
// Create AI provider
|
||||
let ai_config = AIConfig {
|
||||
provider: AIProvider::Ollama,
|
||||
model: "llama2".to_string(),
|
||||
api_key: None,
|
||||
base_url: None,
|
||||
max_tokens: Some(2000),
|
||||
temperature: Some(0.7),
|
||||
};
|
||||
|
||||
let _ai_provider = AIProviderClient::new(ai_config);
|
||||
let mut persona = Persona::new(&self.config)?;
|
||||
|
||||
let enhancement_prompt = format!(
|
||||
"As an AI documentation expert, enhance the following documentation for project '{}'.
|
||||
|
||||
Current documentation:
|
||||
{}
|
||||
|
||||
Please provide enhanced content that includes:
|
||||
1. More detailed project description
|
||||
2. Key features and capabilities
|
||||
3. Usage examples
|
||||
4. Integration points with other AI ecosystem projects
|
||||
5. Development workflow recommendations
|
||||
|
||||
Keep the same structure but expand and improve the content.",
|
||||
project, base_content
|
||||
);
|
||||
|
||||
// Try to get AI response
|
||||
let (response, _) = persona.process_ai_interaction(
|
||||
"docs_system",
|
||||
&enhancement_prompt,
|
||||
Some("ollama".to_string()),
|
||||
Some("llama2".to_string())
|
||||
).await?;
|
||||
|
||||
// If AI response is substantial, use it; otherwise fall back to base content
|
||||
if response.len() > base_content.len() / 2 {
|
||||
Ok(response)
|
||||
} else {
|
||||
Ok(base_content.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
/// セッション終了時の処理(ドキュメント記録・同期)
|
||||
pub async fn session_end_processing(&mut self) -> Result<()> {
|
||||
println!("{}", "🔄 Session end processing started...".cyan());
|
||||
|
||||
// 1. 現在のプロジェクト状況を記録
|
||||
println!("📊 Recording current project status...");
|
||||
self.record_session_summary().await?;
|
||||
|
||||
// 2. 全プロジェクトのドキュメント同期
|
||||
println!("🔄 Syncing all project documentation...");
|
||||
self.sync_all_docs().await?;
|
||||
|
||||
// 3. READMEの自動更新
|
||||
println!("📝 Updating project README files...");
|
||||
self.update_project_readmes().await?;
|
||||
|
||||
// 4. メタデータの更新
|
||||
println!("🏷️ Updating project metadata...");
|
||||
self.update_project_metadata().await?;
|
||||
|
||||
println!("{}", "✅ Session end processing completed!".green());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// セッション概要を記録
|
||||
async fn record_session_summary(&self) -> Result<()> {
|
||||
let session_log_path = self.ai_root.join("session_logs");
|
||||
std::fs::create_dir_all(&session_log_path)?;
|
||||
|
||||
let timestamp = Utc::now().format("%Y-%m-%d_%H-%M-%S");
|
||||
let log_file = session_log_path.join(format!("session_{}.md", timestamp));
|
||||
|
||||
let summary = format!(
|
||||
"# Session Summary - {}\n\n\
|
||||
## Timestamp\n{}\n\n\
|
||||
## Projects Status\n{}\n\n\
|
||||
## Next Actions\n- Documentation sync completed\n- README files updated\n- Metadata refreshed\n\n\
|
||||
---\n*Generated by aigpt session-end processing*\n",
|
||||
timestamp,
|
||||
Utc::now().format("%Y-%m-%d %H:%M:%S UTC"),
|
||||
self.generate_projects_status().await.unwrap_or_else(|_| "Status unavailable".to_string())
|
||||
);
|
||||
|
||||
std::fs::write(log_file, summary)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// プロジェクト状況を生成
|
||||
async fn generate_projects_status(&self) -> Result<String> {
|
||||
let projects = self.discover_projects()?;
|
||||
let mut status = String::new();
|
||||
|
||||
for project in projects {
|
||||
let claude_md = self.ai_root.join(&project).join("claude.md");
|
||||
let readme_md = self.ai_root.join(&project).join("README.md");
|
||||
|
||||
status.push_str(&format!("- **{}**: ", project));
|
||||
if claude_md.exists() {
|
||||
status.push_str("claude.md ✅ ");
|
||||
} else {
|
||||
status.push_str("claude.md ❌ ");
|
||||
}
|
||||
if readme_md.exists() {
|
||||
status.push_str("README.md ✅");
|
||||
} else {
|
||||
status.push_str("README.md ❌");
|
||||
}
|
||||
status.push('\n');
|
||||
}
|
||||
|
||||
Ok(status)
|
||||
}
|
||||
|
||||
/// ai.wikiの更新処理
|
||||
async fn update_ai_wiki(&self) -> Result<()> {
|
||||
let ai_wiki_path = self.ai_root.join("ai.wiki");
|
||||
|
||||
// ai.wikiディレクトリが存在することを確認
|
||||
if !ai_wiki_path.exists() {
|
||||
return Err(anyhow::anyhow!("ai.wiki directory not found at {:?}", ai_wiki_path));
|
||||
}
|
||||
|
||||
// Home.mdの生成
|
||||
let home_content = self.generate_wiki_home_content().await?;
|
||||
let home_path = ai_wiki_path.join("Home.md");
|
||||
std::fs::write(&home_path, &home_content)?;
|
||||
println!(" ✓ Updated: {}", "Home.md".green());
|
||||
|
||||
// title.mdの生成 (Gitea wiki特別ページ用)
|
||||
let title_path = ai_wiki_path.join("title.md");
|
||||
std::fs::write(&title_path, &home_content)?;
|
||||
println!(" ✓ Updated: {}", "title.md".green());
|
||||
|
||||
// プロジェクト個別ディレクトリの更新
|
||||
let projects = self.discover_projects()?;
|
||||
for project in projects {
|
||||
let project_dir = ai_wiki_path.join(&project);
|
||||
std::fs::create_dir_all(&project_dir)?;
|
||||
|
||||
let project_content = self.generate_auto_project_content(&project).await?;
|
||||
let project_file = project_dir.join(format!("{}.md", project));
|
||||
std::fs::write(&project_file, project_content)?;
|
||||
println!(" ✓ Updated: {}", format!("{}/{}.md", project, project).green());
|
||||
}
|
||||
|
||||
println!("{}", "✅ ai.wiki updated successfully".green().bold());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// ai.wiki/Home.mdのコンテンツ生成
|
||||
async fn generate_wiki_home_content(&self) -> Result<String> {
|
||||
let timestamp = Utc::now().format("%Y-%m-%d %H:%M:%S");
|
||||
let mut content = String::new();
|
||||
|
||||
content.push_str("# AI Ecosystem Wiki\n\n");
|
||||
content.push_str("AI生態系プロジェクトの概要とドキュメント集約ページです。\n\n");
|
||||
content.push_str("## プロジェクト一覧\n\n");
|
||||
|
||||
let projects = self.discover_projects()?;
|
||||
let mut project_sections = std::collections::HashMap::new();
|
||||
|
||||
// プロジェクトをカテゴリ別に分類
|
||||
for project in &projects {
|
||||
let info = self.load_project_info(project).unwrap_or_default();
|
||||
let category = match project.as_str() {
|
||||
"ai" => "🧠 AI・知能システム",
|
||||
"gpt" => "🤖 自律・対話システム",
|
||||
"os" => "💻 システム・基盤",
|
||||
"game" => "📁 device",
|
||||
"card" => "🎮 ゲーム・エンターテイメント",
|
||||
"bot" | "moji" | "api" | "log" => "📁 その他",
|
||||
"verse" => "📁 metaverse",
|
||||
"shell" => "⚡ ツール・ユーティリティ",
|
||||
_ => "📁 その他",
|
||||
};
|
||||
|
||||
project_sections.entry(category).or_insert_with(Vec::new).push((project.clone(), info));
|
||||
}
|
||||
|
||||
// カテゴリ別にプロジェクトを出力
|
||||
let mut categories: Vec<_> = project_sections.keys().collect();
|
||||
categories.sort();
|
||||
|
||||
for category in categories {
|
||||
content.push_str(&format!("### {}\n\n", category));
|
||||
|
||||
if let Some(projects_in_category) = project_sections.get(category) {
|
||||
for (project, info) in projects_in_category {
|
||||
content.push_str(&format!("#### [{}]({}.md)\n", project, project));
|
||||
|
||||
if !info.description.is_empty() {
|
||||
content.push_str(&format!("- **名前**: ai.{} - **パッケージ**: ai{} - **タイプ**: {} - **役割**: {}\n\n",
|
||||
project, project, info.project_type, info.description));
|
||||
}
|
||||
|
||||
content.push_str(&format!("**Status**: {} \n", info.status));
|
||||
let branch = self.get_project_branch(project);
|
||||
content.push_str(&format!("**Links**: [Repo](https://git.syui.ai/ai/{}) | [Docs](https://git.syui.ai/ai/{}/src/branch/{}/claude.md)\n\n", project, project, branch));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
content.push_str("---\n\n");
|
||||
content.push_str("## ディレクトリ構成\n\n");
|
||||
content.push_str("- `{project}/` - プロジェクト個別ドキュメント\n");
|
||||
content.push_str("- `claude/` - Claude Code作業記録\n");
|
||||
content.push_str("- `manual/` - 手動作成ドキュメント\n\n");
|
||||
content.push_str("---\n\n");
|
||||
content.push_str("*このページは ai.json と claude/projects/ から自動生成されました* \n");
|
||||
content.push_str(&format!("*最終更新: {}*\n", timestamp));
|
||||
|
||||
Ok(content)
|
||||
}
|
||||
|
||||
/// プロジェクト個別ファイルのコンテンツ生成
|
||||
async fn generate_auto_project_content(&self, project: &str) -> Result<String> {
|
||||
let info = self.load_project_info(project).unwrap_or_default();
|
||||
let mut content = String::new();
|
||||
|
||||
content.push_str(&format!("# {}\n\n", project));
|
||||
content.push_str("## 概要\n");
|
||||
content.push_str(&format!("- **名前**: ai.{} - **パッケージ**: ai{} - **タイプ**: {} - **役割**: {}\n\n",
|
||||
project, project, info.project_type, info.description));
|
||||
|
||||
content.push_str("## プロジェクト情報\n");
|
||||
content.push_str(&format!("- **タイプ**: {}\n", info.project_type));
|
||||
content.push_str(&format!("- **説明**: {}\n", info.description));
|
||||
content.push_str(&format!("- **ステータス**: {}\n", info.status));
|
||||
let branch = self.get_project_branch(project);
|
||||
content.push_str(&format!("- **ブランチ**: {}\n", branch));
|
||||
content.push_str("- **最終更新**: Unknown\n\n");
|
||||
|
||||
// プロジェクト固有の機能情報を追加
|
||||
if !info.features.is_empty() {
|
||||
content.push_str("## 主な機能・特徴\n");
|
||||
for feature in &info.features {
|
||||
content.push_str(&format!("- {}\n", feature));
|
||||
}
|
||||
content.push_str("\n");
|
||||
}
|
||||
|
||||
content.push_str("## リンク\n");
|
||||
content.push_str(&format!("- **Repository**: https://git.syui.ai/ai/{}\n", project));
|
||||
content.push_str(&format!("- **Project Documentation**: [claude/projects/{}.md](https://git.syui.ai/ai/ai/src/branch/main/claude/projects/{}.md)\n", project, project));
|
||||
let branch = self.get_project_branch(project);
|
||||
content.push_str(&format!("- **Generated Documentation**: [{}/claude.md](https://git.syui.ai/ai/{}/src/branch/{}/claude.md)\n\n", project, project, branch));
|
||||
|
||||
content.push_str("---\n");
|
||||
content.push_str(&format!("*このページは claude/projects/{}.md から自動生成されました*\n", project));
|
||||
|
||||
Ok(content)
|
||||
}
|
||||
|
||||
/// リポジトリwiki (Gitea wiki) の更新処理
|
||||
async fn update_repository_wiki(&self) -> Result<()> {
|
||||
println!(" ℹ️ Repository wiki is now unified with ai.wiki");
|
||||
println!(" ℹ️ ai.wiki serves as the source of truth (git@git.syui.ai:ai/ai.wiki.git)");
|
||||
println!(" ℹ️ Special pages generated: Home.md, title.md for Gitea wiki compatibility");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// プロジェクトREADMEファイルの更新
|
||||
async fn update_project_readmes(&self) -> Result<()> {
|
||||
let projects = self.discover_projects()?;
|
||||
|
||||
for project in projects {
|
||||
let readme_path = self.ai_root.join(&project).join("README.md");
|
||||
let claude_md_path = self.ai_root.join(&project).join("claude.md");
|
||||
|
||||
// claude.mdが存在する場合、READMEに同期
|
||||
if claude_md_path.exists() {
|
||||
let claude_content = std::fs::read_to_string(&claude_md_path)?;
|
||||
|
||||
// READMEが存在しない場合は新規作成
|
||||
if !readme_path.exists() {
|
||||
println!("📝 Creating README.md for {}", project);
|
||||
std::fs::write(&readme_path, &claude_content)?;
|
||||
} else {
|
||||
// 既存READMEがclaude.mdより古い場合は更新
|
||||
let readme_metadata = std::fs::metadata(&readme_path)?;
|
||||
let claude_metadata = std::fs::metadata(&claude_md_path)?;
|
||||
|
||||
if claude_metadata.modified()? > readme_metadata.modified()? {
|
||||
println!("🔄 Updating README.md for {}", project);
|
||||
std::fs::write(&readme_path, &claude_content)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// プロジェクトメタデータの更新
|
||||
async fn update_project_metadata(&self) -> Result<()> {
|
||||
let projects = self.discover_projects()?;
|
||||
|
||||
for project in projects {
|
||||
let ai_json_path = self.ai_root.join(&project).join("ai.json");
|
||||
|
||||
if ai_json_path.exists() {
|
||||
let mut content = std::fs::read_to_string(&ai_json_path)?;
|
||||
let mut json_data: serde_json::Value = serde_json::from_str(&content)?;
|
||||
|
||||
// last_updated フィールドを更新
|
||||
if let Some(project_data) = json_data.get_mut(&project) {
|
||||
if let Some(obj) = project_data.as_object_mut() {
|
||||
obj.insert("last_updated".to_string(),
|
||||
serde_json::Value::String(Utc::now().to_rfc3339()));
|
||||
obj.insert("status".to_string(),
|
||||
serde_json::Value::String("active".to_string()));
|
||||
|
||||
content = serde_json::to_string_pretty(&json_data)?;
|
||||
std::fs::write(&ai_json_path, content)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// メインai.jsonからプロジェクトのブランチ情報を取得
|
||||
fn get_project_branch(&self, project: &str) -> String {
|
||||
let main_ai_json_path = self.ai_root.join("ai.json");
|
||||
|
||||
if main_ai_json_path.exists() {
|
||||
if let Ok(content) = std::fs::read_to_string(&main_ai_json_path) {
|
||||
if let Ok(json_data) = serde_json::from_str::<serde_json::Value>(&content) {
|
||||
if let Some(ai_section) = json_data.get("ai") {
|
||||
if let Some(project_data) = ai_section.get(project) {
|
||||
if let Some(branch) = project_data.get("branch").and_then(|v| v.as_str()) {
|
||||
return branch.to_string();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// デフォルトはmain
|
||||
"main".to_string()
|
||||
}
|
||||
}
|
||||
409
src/http_client.rs
Normal file
409
src/http_client.rs
Normal file
@@ -0,0 +1,409 @@
|
||||
use anyhow::{anyhow, Result};
|
||||
use reqwest::Client;
|
||||
use serde_json::Value;
|
||||
use serde::{Serialize, Deserialize};
|
||||
use std::time::Duration;
|
||||
use std::collections::HashMap;
|
||||
|
||||
/// Service configuration for unified service management
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ServiceConfig {
|
||||
pub base_url: String,
|
||||
pub timeout: Duration,
|
||||
pub health_endpoint: String,
|
||||
}
|
||||
|
||||
impl Default for ServiceConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
base_url: "http://localhost:8000".to_string(),
|
||||
timeout: Duration::from_secs(30),
|
||||
health_endpoint: "/health".to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// HTTP client for inter-service communication
|
||||
pub struct ServiceClient {
|
||||
client: Client,
|
||||
service_registry: HashMap<String, ServiceConfig>,
|
||||
}
|
||||
|
||||
impl ServiceClient {
|
||||
pub fn new() -> Self {
|
||||
Self::with_default_services()
|
||||
}
|
||||
|
||||
/// Create ServiceClient with default ai ecosystem services
|
||||
pub fn with_default_services() -> Self {
|
||||
let client = Client::builder()
|
||||
.timeout(Duration::from_secs(30))
|
||||
.build()
|
||||
.expect("Failed to create HTTP client");
|
||||
|
||||
let mut service_registry = HashMap::new();
|
||||
|
||||
// Register default ai ecosystem services
|
||||
service_registry.insert("ai.card".to_string(), ServiceConfig {
|
||||
base_url: "http://localhost:8000".to_string(),
|
||||
timeout: Duration::from_secs(30),
|
||||
health_endpoint: "/health".to_string(),
|
||||
});
|
||||
|
||||
service_registry.insert("ai.log".to_string(), ServiceConfig {
|
||||
base_url: "http://localhost:8002".to_string(),
|
||||
timeout: Duration::from_secs(30),
|
||||
health_endpoint: "/health".to_string(),
|
||||
});
|
||||
|
||||
service_registry.insert("ai.bot".to_string(), ServiceConfig {
|
||||
base_url: "http://localhost:8003".to_string(),
|
||||
timeout: Duration::from_secs(30),
|
||||
health_endpoint: "/health".to_string(),
|
||||
});
|
||||
|
||||
Self { client, service_registry }
|
||||
}
|
||||
|
||||
/// Create ServiceClient with custom service registry
|
||||
pub fn with_services(service_registry: HashMap<String, ServiceConfig>) -> Self {
|
||||
let client = Client::builder()
|
||||
.timeout(Duration::from_secs(30))
|
||||
.build()
|
||||
.expect("Failed to create HTTP client");
|
||||
|
||||
Self { client, service_registry }
|
||||
}
|
||||
|
||||
/// Register a new service configuration
|
||||
pub fn register_service(&mut self, name: String, config: ServiceConfig) {
|
||||
self.service_registry.insert(name, config);
|
||||
}
|
||||
|
||||
/// Get service configuration by name
|
||||
pub fn get_service_config(&self, service: &str) -> Result<&ServiceConfig> {
|
||||
self.service_registry.get(service)
|
||||
.ok_or_else(|| anyhow!("Unknown service: {}", service))
|
||||
}
|
||||
|
||||
/// Universal service method call
|
||||
pub async fn call_service_method<T: Serialize>(
|
||||
&self,
|
||||
service: &str,
|
||||
method: &str,
|
||||
params: &T
|
||||
) -> Result<Value> {
|
||||
let config = self.get_service_config(service)?;
|
||||
let url = format!("{}/{}", config.base_url.trim_end_matches('/'), method.trim_start_matches('/'));
|
||||
|
||||
self.post_request(&url, &serde_json::to_value(params)?).await
|
||||
}
|
||||
|
||||
/// Universal service GET call
|
||||
pub async fn call_service_get(&self, service: &str, endpoint: &str) -> Result<Value> {
|
||||
let config = self.get_service_config(service)?;
|
||||
let url = format!("{}/{}", config.base_url.trim_end_matches('/'), endpoint.trim_start_matches('/'));
|
||||
|
||||
self.get_request(&url).await
|
||||
}
|
||||
|
||||
/// Check if a service is available
|
||||
pub async fn check_service_status(&self, base_url: &str) -> Result<ServiceStatus> {
|
||||
let url = format!("{}/health", base_url.trim_end_matches('/'));
|
||||
|
||||
match self.client.get(&url).send().await {
|
||||
Ok(response) => {
|
||||
if response.status().is_success() {
|
||||
Ok(ServiceStatus::Available)
|
||||
} else {
|
||||
Ok(ServiceStatus::Error(format!("HTTP {}", response.status())))
|
||||
}
|
||||
}
|
||||
Err(e) => Ok(ServiceStatus::Unavailable(e.to_string())),
|
||||
}
|
||||
}
|
||||
|
||||
/// Make a GET request to a service
|
||||
pub async fn get_request(&self, url: &str) -> Result<Value> {
|
||||
let response = self.client
|
||||
.get(url)
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
if !response.status().is_success() {
|
||||
return Err(anyhow!("Request failed with status: {}", response.status()));
|
||||
}
|
||||
|
||||
let json: Value = response.json().await?;
|
||||
Ok(json)
|
||||
}
|
||||
|
||||
/// Make a POST request to a service
|
||||
pub async fn post_request(&self, url: &str, body: &Value) -> Result<Value> {
|
||||
let response = self.client
|
||||
.post(url)
|
||||
.header("Content-Type", "application/json")
|
||||
.json(body)
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
if !response.status().is_success() {
|
||||
return Err(anyhow!("Request failed with status: {}", response.status()));
|
||||
}
|
||||
|
||||
let json: Value = response.json().await?;
|
||||
Ok(json)
|
||||
}
|
||||
|
||||
/// Get user's card collection from ai.card service
|
||||
pub async fn get_user_cards(&self, user_did: &str) -> Result<Value> {
|
||||
let endpoint = format!("api/v1/cards/user/{}", user_did);
|
||||
self.call_service_get("ai.card", &endpoint).await
|
||||
}
|
||||
|
||||
/// Draw a card for user from ai.card service
|
||||
pub async fn draw_card(&self, user_did: &str, is_paid: bool) -> Result<Value> {
|
||||
let params = serde_json::json!({
|
||||
"user_did": user_did,
|
||||
"is_paid": is_paid
|
||||
});
|
||||
|
||||
self.call_service_method("ai.card", "api/v1/cards/draw", ¶ms).await
|
||||
}
|
||||
|
||||
/// Get card statistics from ai.card service
|
||||
pub async fn get_card_stats(&self) -> Result<Value> {
|
||||
self.call_service_get("ai.card", "api/v1/cards/gacha-stats").await
|
||||
}
|
||||
|
||||
// MARK: - ai.log service methods
|
||||
|
||||
/// Create a new blog post
|
||||
pub async fn create_blog_post<T: Serialize>(&self, params: &T) -> Result<Value> {
|
||||
self.call_service_method("ai.log", "api/v1/posts", params).await
|
||||
}
|
||||
|
||||
/// Get list of blog posts
|
||||
pub async fn get_blog_posts(&self) -> Result<Value> {
|
||||
self.call_service_get("ai.log", "api/v1/posts").await
|
||||
}
|
||||
|
||||
/// Build the blog
|
||||
pub async fn build_blog(&self) -> Result<Value> {
|
||||
self.call_service_method("ai.log", "api/v1/build", &serde_json::json!({})).await
|
||||
}
|
||||
|
||||
/// Translate document using ai.log service
|
||||
pub async fn translate_document<T: Serialize>(&self, params: &T) -> Result<Value> {
|
||||
self.call_service_method("ai.log", "api/v1/translate", params).await
|
||||
}
|
||||
|
||||
/// Generate documentation using ai.log service
|
||||
pub async fn generate_docs<T: Serialize>(&self, params: &T) -> Result<Value> {
|
||||
self.call_service_method("ai.log", "api/v1/docs", params).await
|
||||
}
|
||||
}
|
||||
|
||||
/// Service status enum
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum ServiceStatus {
|
||||
Available,
|
||||
Unavailable(String),
|
||||
Error(String),
|
||||
}
|
||||
|
||||
impl ServiceStatus {
|
||||
pub fn is_available(&self) -> bool {
|
||||
matches!(self, ServiceStatus::Available)
|
||||
}
|
||||
}
|
||||
|
||||
/// Service detector for ai ecosystem services
|
||||
pub struct ServiceDetector {
|
||||
client: ServiceClient,
|
||||
}
|
||||
|
||||
impl ServiceDetector {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
client: ServiceClient::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Check all ai ecosystem services
|
||||
pub async fn detect_services(&self) -> ServiceMap {
|
||||
let mut services = ServiceMap::default();
|
||||
|
||||
// Check ai.card service
|
||||
if let Ok(status) = self.client.check_service_status("http://localhost:8000").await {
|
||||
services.ai_card = Some(ServiceInfo {
|
||||
base_url: "http://localhost:8000".to_string(),
|
||||
status,
|
||||
});
|
||||
}
|
||||
|
||||
// Check ai.log service
|
||||
if let Ok(status) = self.client.check_service_status("http://localhost:8001").await {
|
||||
services.ai_log = Some(ServiceInfo {
|
||||
base_url: "http://localhost:8001".to_string(),
|
||||
status,
|
||||
});
|
||||
}
|
||||
|
||||
// Check ai.bot service
|
||||
if let Ok(status) = self.client.check_service_status("http://localhost:8002").await {
|
||||
services.ai_bot = Some(ServiceInfo {
|
||||
base_url: "http://localhost:8002".to_string(),
|
||||
status,
|
||||
});
|
||||
}
|
||||
|
||||
services
|
||||
}
|
||||
|
||||
/// Get available services only
|
||||
pub async fn get_available_services(&self) -> Vec<String> {
|
||||
let services = self.detect_services().await;
|
||||
let mut available = Vec::new();
|
||||
|
||||
if let Some(card) = &services.ai_card {
|
||||
if card.status.is_available() {
|
||||
available.push("ai.card".to_string());
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(log) = &services.ai_log {
|
||||
if log.status.is_available() {
|
||||
available.push("ai.log".to_string());
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(bot) = &services.ai_bot {
|
||||
if bot.status.is_available() {
|
||||
available.push("ai.bot".to_string());
|
||||
}
|
||||
}
|
||||
|
||||
available
|
||||
}
|
||||
|
||||
/// Get card collection statistics
|
||||
pub async fn get_card_stats(&self) -> Result<serde_json::Value, Box<dyn std::error::Error>> {
|
||||
match self.client.get_request("http://localhost:8000/api/v1/cards/gacha-stats").await {
|
||||
Ok(stats) => Ok(stats),
|
||||
Err(e) => Err(e.into()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Draw a card for user
|
||||
pub async fn draw_card(&self, user_did: &str, is_paid: bool) -> Result<serde_json::Value, Box<dyn std::error::Error>> {
|
||||
let payload = serde_json::json!({
|
||||
"user_did": user_did,
|
||||
"is_paid": is_paid
|
||||
});
|
||||
|
||||
match self.client.post_request("http://localhost:8000/api/v1/cards/draw", &payload).await {
|
||||
Ok(card) => Ok(card),
|
||||
Err(e) => Err(e.into()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Get user's card collection
|
||||
pub async fn get_user_cards(&self, user_did: &str) -> Result<serde_json::Value, Box<dyn std::error::Error>> {
|
||||
let url = format!("http://localhost:8000/api/v1/cards/collection?did={}", user_did);
|
||||
match self.client.get_request(&url).await {
|
||||
Ok(collection) => Ok(collection),
|
||||
Err(e) => Err(e.into()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Get contextual memories for conversation mode
|
||||
pub async fn get_contextual_memories(&self, _user_id: &str, _limit: usize) -> Result<Vec<crate::memory::Memory>, Box<dyn std::error::Error>> {
|
||||
// This is a simplified version - in a real implementation this would call the MCP server
|
||||
// For now, we'll return an empty vec to make compilation work
|
||||
Ok(Vec::new())
|
||||
}
|
||||
|
||||
/// Search memories by query
|
||||
pub async fn search_memories(&self, _query: &str, _limit: usize) -> Result<Vec<crate::memory::Memory>, Box<dyn std::error::Error>> {
|
||||
// This is a simplified version - in a real implementation this would call the MCP server
|
||||
// For now, we'll return an empty vec to make compilation work
|
||||
Ok(Vec::new())
|
||||
}
|
||||
|
||||
/// Create context summary
|
||||
pub async fn create_summary(&self, user_id: &str) -> Result<String, Box<dyn std::error::Error>> {
|
||||
// This is a simplified version - in a real implementation this would call the MCP server
|
||||
// For now, we'll return a placeholder summary
|
||||
Ok(format!("Context summary for user: {}", user_id))
|
||||
}
|
||||
}
|
||||
|
||||
/// Service information
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ServiceInfo {
|
||||
pub base_url: String,
|
||||
pub status: ServiceStatus,
|
||||
}
|
||||
|
||||
/// Map of all ai ecosystem services
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct ServiceMap {
|
||||
pub ai_card: Option<ServiceInfo>,
|
||||
pub ai_log: Option<ServiceInfo>,
|
||||
pub ai_bot: Option<ServiceInfo>,
|
||||
}
|
||||
|
||||
impl ServiceMap {
|
||||
/// Get service info by name
|
||||
pub fn get_service(&self, name: &str) -> Option<&ServiceInfo> {
|
||||
match name {
|
||||
"ai.card" => self.ai_card.as_ref(),
|
||||
"ai.log" => self.ai_log.as_ref(),
|
||||
"ai.bot" => self.ai_bot.as_ref(),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if a service is available
|
||||
pub fn is_service_available(&self, name: &str) -> bool {
|
||||
self.get_service(name)
|
||||
.map(|info| info.status.is_available())
|
||||
.unwrap_or(false)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_service_client_creation() {
|
||||
let _client = ServiceClient::new();
|
||||
// Basic test to ensure client can be created
|
||||
assert!(true);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_service_status() {
|
||||
let status = ServiceStatus::Available;
|
||||
assert!(status.is_available());
|
||||
|
||||
let status = ServiceStatus::Unavailable("Connection refused".to_string());
|
||||
assert!(!status.is_available());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_service_map() {
|
||||
let mut map = ServiceMap::default();
|
||||
assert!(!map.is_service_available("ai.card"));
|
||||
|
||||
map.ai_card = Some(ServiceInfo {
|
||||
base_url: "http://localhost:8000".to_string(),
|
||||
status: ServiceStatus::Available,
|
||||
});
|
||||
|
||||
assert!(map.is_service_available("ai.card"));
|
||||
assert!(!map.is_service_available("ai.log"));
|
||||
}
|
||||
}
|
||||
331
src/import.rs
Normal file
331
src/import.rs
Normal file
@@ -0,0 +1,331 @@
|
||||
use std::collections::HashMap;
|
||||
use std::path::PathBuf;
|
||||
use serde::Deserialize;
|
||||
use anyhow::{Result, Context};
|
||||
use colored::*;
|
||||
use chrono::{DateTime, Utc};
|
||||
|
||||
use crate::config::Config;
|
||||
use crate::persona::Persona;
|
||||
use crate::memory::{Memory, MemoryType};
|
||||
|
||||
pub async fn handle_import_chatgpt(
|
||||
file_path: PathBuf,
|
||||
user_id: Option<String>,
|
||||
data_dir: Option<PathBuf>,
|
||||
) -> Result<()> {
|
||||
let config = Config::new(data_dir)?;
|
||||
let mut persona = Persona::new(&config)?;
|
||||
let user_id = user_id.unwrap_or_else(|| "imported_user".to_string());
|
||||
|
||||
println!("{}", "🚀 Starting ChatGPT Import...".cyan().bold());
|
||||
println!("File: {}", file_path.display().to_string().yellow());
|
||||
println!("User ID: {}", user_id.yellow());
|
||||
println!();
|
||||
|
||||
let mut importer = ChatGPTImporter::new(user_id);
|
||||
let stats = importer.import_from_file(&file_path, &mut persona).await?;
|
||||
|
||||
// Display import statistics
|
||||
println!("\n{}", "📊 Import Statistics".green().bold());
|
||||
println!("Conversations imported: {}", stats.conversations_imported.to_string().cyan());
|
||||
println!("Messages imported: {}", stats.messages_imported.to_string().cyan());
|
||||
println!(" - User messages: {}", stats.user_messages.to_string().yellow());
|
||||
println!(" - Assistant messages: {}", stats.assistant_messages.to_string().yellow());
|
||||
if stats.skipped_messages > 0 {
|
||||
println!(" - Skipped messages: {}", stats.skipped_messages.to_string().red());
|
||||
}
|
||||
|
||||
// Show updated relationship
|
||||
if let Some(relationship) = persona.get_relationship(&importer.user_id) {
|
||||
println!("\n{}", "👥 Updated Relationship".blue().bold());
|
||||
println!("Status: {}", relationship.status.to_string().yellow());
|
||||
println!("Score: {:.2} / {}", relationship.score, relationship.threshold);
|
||||
println!("Transmission enabled: {}",
|
||||
if relationship.transmission_enabled { "✓".green() } else { "✗".red() });
|
||||
}
|
||||
|
||||
println!("\n{}", "✅ ChatGPT import completed successfully!".green().bold());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ImportStats {
|
||||
pub conversations_imported: usize,
|
||||
pub messages_imported: usize,
|
||||
pub user_messages: usize,
|
||||
pub assistant_messages: usize,
|
||||
pub skipped_messages: usize,
|
||||
}
|
||||
|
||||
impl Default for ImportStats {
|
||||
fn default() -> Self {
|
||||
ImportStats {
|
||||
conversations_imported: 0,
|
||||
messages_imported: 0,
|
||||
user_messages: 0,
|
||||
assistant_messages: 0,
|
||||
skipped_messages: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ChatGPTImporter {
|
||||
user_id: String,
|
||||
stats: ImportStats,
|
||||
}
|
||||
|
||||
impl ChatGPTImporter {
|
||||
pub fn new(user_id: String) -> Self {
|
||||
ChatGPTImporter {
|
||||
user_id,
|
||||
stats: ImportStats::default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn import_from_file(&mut self, file_path: &PathBuf, persona: &mut Persona) -> Result<ImportStats> {
|
||||
// Read and parse the JSON file
|
||||
let content = std::fs::read_to_string(file_path)
|
||||
.with_context(|| format!("Failed to read file: {}", file_path.display()))?;
|
||||
|
||||
let conversations: Vec<ChatGPTConversation> = serde_json::from_str(&content)
|
||||
.context("Failed to parse ChatGPT export JSON")?;
|
||||
|
||||
println!("Found {} conversations to import", conversations.len());
|
||||
|
||||
// Import each conversation
|
||||
for (i, conversation) in conversations.iter().enumerate() {
|
||||
if i % 10 == 0 && i > 0 {
|
||||
println!("Processed {} / {} conversations...", i, conversations.len());
|
||||
}
|
||||
|
||||
match self.import_single_conversation(conversation, persona).await {
|
||||
Ok(_) => {
|
||||
self.stats.conversations_imported += 1;
|
||||
}
|
||||
Err(e) => {
|
||||
println!("{}: Failed to import conversation '{}': {}",
|
||||
"Warning".yellow(),
|
||||
conversation.title.as_deref().unwrap_or("Untitled"),
|
||||
e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(self.stats.clone())
|
||||
}
|
||||
|
||||
async fn import_single_conversation(&mut self, conversation: &ChatGPTConversation, persona: &mut Persona) -> Result<()> {
|
||||
// Extract messages from the mapping structure
|
||||
let messages = self.extract_messages_from_mapping(&conversation.mapping)?;
|
||||
|
||||
if messages.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Process each message
|
||||
for message in messages {
|
||||
match self.process_message(&message, persona).await {
|
||||
Ok(_) => {
|
||||
self.stats.messages_imported += 1;
|
||||
}
|
||||
Err(_) => {
|
||||
self.stats.skipped_messages += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn extract_messages_from_mapping(&self, mapping: &HashMap<String, ChatGPTNode>) -> Result<Vec<ChatGPTMessage>> {
|
||||
let mut messages = Vec::new();
|
||||
|
||||
// Find all message nodes and collect them
|
||||
for node in mapping.values() {
|
||||
if let Some(message) = &node.message {
|
||||
// Skip system messages and other non-user/assistant messages
|
||||
if let Some(role) = &message.author.role {
|
||||
match role.as_str() {
|
||||
"user" | "assistant" => {
|
||||
if let Some(content) = &message.content {
|
||||
let content_text = if content.content_type == "text" && !content.parts.is_empty() {
|
||||
// Extract text from parts (handle both strings and mixed content)
|
||||
content.parts.iter()
|
||||
.filter_map(|part| part.as_str())
|
||||
.collect::<Vec<&str>>()
|
||||
.join("\n")
|
||||
} else if content.content_type == "multimodal_text" {
|
||||
// Extract text parts from multimodal content
|
||||
let mut text_parts = Vec::new();
|
||||
for part in &content.parts {
|
||||
if let Some(text) = part.as_str() {
|
||||
if !text.is_empty() {
|
||||
text_parts.push(text);
|
||||
}
|
||||
}
|
||||
// Skip non-text parts (like image_asset_pointer)
|
||||
}
|
||||
if text_parts.is_empty() {
|
||||
continue; // Skip if no text content
|
||||
}
|
||||
text_parts.join("\n")
|
||||
} else if content.content_type == "user_editable_context" {
|
||||
// Handle user context messages
|
||||
if let Some(instructions) = &content.user_instructions {
|
||||
format!("User instructions: {}", instructions)
|
||||
} else if let Some(profile) = &content.user_profile {
|
||||
format!("User profile: {}", profile)
|
||||
} else {
|
||||
continue; // Skip empty context messages
|
||||
}
|
||||
} else {
|
||||
continue; // Skip other content types for now
|
||||
};
|
||||
|
||||
if !content_text.trim().is_empty() {
|
||||
messages.push(ChatGPTMessage {
|
||||
role: role.clone(),
|
||||
content: content_text,
|
||||
create_time: message.create_time,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => {} // Skip system, tool, etc.
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Sort messages by creation time
|
||||
messages.sort_by(|a, b| {
|
||||
let time_a = a.create_time.unwrap_or(0.0);
|
||||
let time_b = b.create_time.unwrap_or(0.0);
|
||||
time_a.partial_cmp(&time_b).unwrap_or(std::cmp::Ordering::Equal)
|
||||
});
|
||||
|
||||
Ok(messages)
|
||||
}
|
||||
|
||||
async fn process_message(&mut self, message: &ChatGPTMessage, persona: &mut Persona) -> Result<()> {
|
||||
let timestamp = self.convert_timestamp(message.create_time.unwrap_or(0.0))?;
|
||||
|
||||
match message.role.as_str() {
|
||||
"user" => {
|
||||
self.add_user_message(&message.content, timestamp, persona)?;
|
||||
self.stats.user_messages += 1;
|
||||
}
|
||||
"assistant" => {
|
||||
self.add_assistant_message(&message.content, timestamp, persona)?;
|
||||
self.stats.assistant_messages += 1;
|
||||
}
|
||||
_ => {
|
||||
return Err(anyhow::anyhow!("Unsupported message role: {}", message.role));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn add_user_message(&self, content: &str, timestamp: DateTime<Utc>, persona: &mut Persona) -> Result<()> {
|
||||
// Create high-importance memory for user messages
|
||||
let memory = Memory {
|
||||
id: uuid::Uuid::new_v4().to_string(),
|
||||
user_id: self.user_id.clone(),
|
||||
content: content.to_string(),
|
||||
summary: None,
|
||||
importance: 0.8, // High importance for imported user data
|
||||
memory_type: MemoryType::Core,
|
||||
created_at: timestamp,
|
||||
last_accessed: timestamp,
|
||||
access_count: 1,
|
||||
};
|
||||
|
||||
// Add memory and update relationship
|
||||
persona.add_memory(memory)?;
|
||||
persona.update_relationship(&self.user_id, 1.0)?; // Positive relationship boost
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn add_assistant_message(&self, content: &str, timestamp: DateTime<Utc>, persona: &mut Persona) -> Result<()> {
|
||||
// Create medium-importance memory for assistant responses
|
||||
let memory = Memory {
|
||||
id: uuid::Uuid::new_v4().to_string(),
|
||||
user_id: self.user_id.clone(),
|
||||
content: format!("[AI Response] {}", content),
|
||||
summary: Some("Imported ChatGPT response".to_string()),
|
||||
importance: 0.6, // Medium importance for AI responses
|
||||
memory_type: MemoryType::Summary,
|
||||
created_at: timestamp,
|
||||
last_accessed: timestamp,
|
||||
access_count: 1,
|
||||
};
|
||||
|
||||
persona.add_memory(memory)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn convert_timestamp(&self, unix_timestamp: f64) -> Result<DateTime<Utc>> {
|
||||
if unix_timestamp <= 0.0 {
|
||||
return Ok(Utc::now());
|
||||
}
|
||||
|
||||
DateTime::from_timestamp(
|
||||
unix_timestamp as i64,
|
||||
((unix_timestamp % 1.0) * 1_000_000_000.0) as u32
|
||||
).ok_or_else(|| anyhow::anyhow!("Invalid timestamp: {}", unix_timestamp))
|
||||
}
|
||||
}
|
||||
|
||||
// ChatGPT Export Data Structures
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct ChatGPTConversation {
|
||||
pub title: Option<String>,
|
||||
pub create_time: Option<f64>,
|
||||
pub mapping: HashMap<String, ChatGPTNode>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct ChatGPTNode {
|
||||
pub id: Option<String>,
|
||||
pub message: Option<ChatGPTNodeMessage>,
|
||||
pub parent: Option<String>,
|
||||
pub children: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct ChatGPTNodeMessage {
|
||||
pub id: String,
|
||||
pub author: ChatGPTAuthor,
|
||||
pub create_time: Option<f64>,
|
||||
pub content: Option<ChatGPTContent>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct ChatGPTAuthor {
|
||||
pub role: Option<String>,
|
||||
pub name: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct ChatGPTContent {
|
||||
pub content_type: String,
|
||||
#[serde(default)]
|
||||
pub parts: Vec<serde_json::Value>,
|
||||
#[serde(default)]
|
||||
pub user_profile: Option<String>,
|
||||
#[serde(default)]
|
||||
pub user_instructions: Option<String>,
|
||||
}
|
||||
|
||||
// Simplified message structure for processing
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ChatGPTMessage {
|
||||
pub role: String,
|
||||
pub content: String,
|
||||
pub create_time: Option<f64>,
|
||||
}
|
||||
20
src/lib.rs
Normal file
20
src/lib.rs
Normal file
@@ -0,0 +1,20 @@
|
||||
#![allow(dead_code)]
|
||||
|
||||
pub mod ai_provider;
|
||||
pub mod cli;
|
||||
pub mod config;
|
||||
pub mod conversation;
|
||||
pub mod docs;
|
||||
pub mod http_client;
|
||||
pub mod import;
|
||||
pub mod mcp_server;
|
||||
pub mod memory;
|
||||
pub mod openai_provider;
|
||||
pub mod persona;
|
||||
pub mod relationship;
|
||||
pub mod scheduler;
|
||||
pub mod shell;
|
||||
pub mod status;
|
||||
pub mod submodules;
|
||||
pub mod tokens;
|
||||
pub mod transmission;
|
||||
251
src/main.rs
Normal file
251
src/main.rs
Normal file
@@ -0,0 +1,251 @@
|
||||
#![allow(dead_code)]
|
||||
|
||||
use clap::{Parser, Subcommand};
|
||||
use std::path::PathBuf;
|
||||
|
||||
mod ai_provider;
|
||||
mod cli;
|
||||
use cli::TokenCommands;
|
||||
mod config;
|
||||
mod conversation;
|
||||
mod docs;
|
||||
mod http_client;
|
||||
mod import;
|
||||
mod mcp_server;
|
||||
mod memory;
|
||||
mod openai_provider;
|
||||
mod persona;
|
||||
mod relationship;
|
||||
mod scheduler;
|
||||
mod shell;
|
||||
mod status;
|
||||
mod submodules;
|
||||
mod tokens;
|
||||
mod transmission;
|
||||
|
||||
#[derive(Parser)]
|
||||
#[command(name = "aigpt")]
|
||||
#[command(about = "AI.GPT - Autonomous transmission AI with unique personality")]
|
||||
#[command(version)]
|
||||
struct Cli {
|
||||
#[command(subcommand)]
|
||||
command: Commands,
|
||||
}
|
||||
|
||||
#[derive(Subcommand)]
|
||||
enum Commands {
|
||||
/// Check AI status and relationships
|
||||
Status {
|
||||
/// User ID to check status for
|
||||
user_id: Option<String>,
|
||||
/// Data directory
|
||||
#[arg(short, long)]
|
||||
data_dir: Option<PathBuf>,
|
||||
},
|
||||
/// Chat with the AI
|
||||
Chat {
|
||||
/// User ID (atproto DID)
|
||||
user_id: String,
|
||||
/// Message to send to AI
|
||||
message: String,
|
||||
/// Data directory
|
||||
#[arg(short, long)]
|
||||
data_dir: Option<PathBuf>,
|
||||
/// AI model to use
|
||||
#[arg(short, long)]
|
||||
model: Option<String>,
|
||||
/// AI provider (ollama/openai)
|
||||
#[arg(long)]
|
||||
provider: Option<String>,
|
||||
},
|
||||
/// Start continuous conversation mode with MCP integration
|
||||
Conversation {
|
||||
/// User ID (atproto DID)
|
||||
user_id: String,
|
||||
/// Data directory
|
||||
#[arg(short, long)]
|
||||
data_dir: Option<PathBuf>,
|
||||
/// AI model to use
|
||||
#[arg(short, long)]
|
||||
model: Option<String>,
|
||||
/// AI provider (ollama/openai)
|
||||
#[arg(long)]
|
||||
provider: Option<String>,
|
||||
},
|
||||
/// Start continuous conversation mode with MCP integration (alias)
|
||||
Conv {
|
||||
/// User ID (atproto DID)
|
||||
user_id: String,
|
||||
/// Data directory
|
||||
#[arg(short, long)]
|
||||
data_dir: Option<PathBuf>,
|
||||
/// AI model to use
|
||||
#[arg(short, long)]
|
||||
model: Option<String>,
|
||||
/// AI provider (ollama/openai)
|
||||
#[arg(long)]
|
||||
provider: Option<String>,
|
||||
},
|
||||
/// Check today's AI fortune
|
||||
Fortune {
|
||||
/// Data directory
|
||||
#[arg(short, long)]
|
||||
data_dir: Option<PathBuf>,
|
||||
},
|
||||
/// List all relationships
|
||||
Relationships {
|
||||
/// Data directory
|
||||
#[arg(short, long)]
|
||||
data_dir: Option<PathBuf>,
|
||||
},
|
||||
/// Check and send autonomous transmissions
|
||||
Transmit {
|
||||
/// Data directory
|
||||
#[arg(short, long)]
|
||||
data_dir: Option<PathBuf>,
|
||||
},
|
||||
/// Run daily maintenance tasks
|
||||
Maintenance {
|
||||
/// Data directory
|
||||
#[arg(short, long)]
|
||||
data_dir: Option<PathBuf>,
|
||||
},
|
||||
/// Run scheduled tasks
|
||||
Schedule {
|
||||
/// Data directory
|
||||
#[arg(short, long)]
|
||||
data_dir: Option<PathBuf>,
|
||||
},
|
||||
/// Start MCP server
|
||||
Server {
|
||||
/// Port to listen on
|
||||
#[arg(short, long, default_value = "8080")]
|
||||
port: u16,
|
||||
/// Data directory
|
||||
#[arg(short, long)]
|
||||
data_dir: Option<PathBuf>,
|
||||
},
|
||||
/// Interactive shell mode
|
||||
Shell {
|
||||
/// User ID (atproto DID)
|
||||
user_id: String,
|
||||
/// Data directory
|
||||
#[arg(short, long)]
|
||||
data_dir: Option<PathBuf>,
|
||||
/// AI model to use
|
||||
#[arg(short, long)]
|
||||
model: Option<String>,
|
||||
/// AI provider (ollama/openai)
|
||||
#[arg(long)]
|
||||
provider: Option<String>,
|
||||
},
|
||||
/// Import ChatGPT conversation data
|
||||
ImportChatgpt {
|
||||
/// Path to ChatGPT export JSON file
|
||||
file_path: PathBuf,
|
||||
/// User ID for imported conversations
|
||||
#[arg(short, long)]
|
||||
user_id: Option<String>,
|
||||
/// Data directory
|
||||
#[arg(short, long)]
|
||||
data_dir: Option<PathBuf>,
|
||||
},
|
||||
/// Documentation management
|
||||
Docs {
|
||||
/// Action to perform (generate, sync, list, status)
|
||||
action: String,
|
||||
/// Project name for generate/sync actions
|
||||
#[arg(short, long)]
|
||||
project: Option<String>,
|
||||
/// Output path for generated documentation
|
||||
#[arg(short, long)]
|
||||
output: Option<PathBuf>,
|
||||
/// Enable AI integration for documentation enhancement
|
||||
#[arg(long)]
|
||||
ai_integration: bool,
|
||||
/// Data directory
|
||||
#[arg(short, long)]
|
||||
data_dir: Option<PathBuf>,
|
||||
},
|
||||
/// Submodule management
|
||||
Submodules {
|
||||
/// Action to perform (list, update, status)
|
||||
action: String,
|
||||
/// Specific module to update
|
||||
#[arg(short, long)]
|
||||
module: Option<String>,
|
||||
/// Update all submodules
|
||||
#[arg(long)]
|
||||
all: bool,
|
||||
/// Show what would be done without making changes
|
||||
#[arg(long)]
|
||||
dry_run: bool,
|
||||
/// Auto-commit changes after update
|
||||
#[arg(long)]
|
||||
auto_commit: bool,
|
||||
/// Show verbose output
|
||||
#[arg(short, long)]
|
||||
verbose: bool,
|
||||
/// Data directory
|
||||
#[arg(short, long)]
|
||||
data_dir: Option<PathBuf>,
|
||||
},
|
||||
/// Token usage analysis and cost estimation
|
||||
Tokens {
|
||||
#[command(subcommand)]
|
||||
command: TokenCommands,
|
||||
},
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
let cli = Cli::parse();
|
||||
|
||||
match cli.command {
|
||||
Commands::Status { user_id, data_dir } => {
|
||||
status::handle_status(user_id, data_dir).await
|
||||
}
|
||||
Commands::Chat { user_id, message, data_dir, model, provider } => {
|
||||
cli::handle_chat(user_id, message, data_dir, model, provider).await
|
||||
}
|
||||
Commands::Conversation { user_id, data_dir, model, provider } => {
|
||||
conversation::handle_conversation(user_id, data_dir, model, provider).await
|
||||
}
|
||||
Commands::Conv { user_id, data_dir, model, provider } => {
|
||||
conversation::handle_conversation(user_id, data_dir, model, provider).await
|
||||
}
|
||||
Commands::Fortune { data_dir } => {
|
||||
cli::handle_fortune(data_dir).await
|
||||
}
|
||||
Commands::Relationships { data_dir } => {
|
||||
cli::handle_relationships(data_dir).await
|
||||
}
|
||||
Commands::Transmit { data_dir } => {
|
||||
cli::handle_transmit(data_dir).await
|
||||
}
|
||||
Commands::Maintenance { data_dir } => {
|
||||
cli::handle_maintenance(data_dir).await
|
||||
}
|
||||
Commands::Schedule { data_dir } => {
|
||||
cli::handle_schedule(data_dir).await
|
||||
}
|
||||
Commands::Server { port, data_dir } => {
|
||||
cli::handle_server(Some(port), data_dir).await
|
||||
}
|
||||
Commands::Shell { user_id, data_dir, model, provider } => {
|
||||
shell::handle_shell(user_id, data_dir, model, provider).await
|
||||
}
|
||||
Commands::ImportChatgpt { file_path, user_id, data_dir } => {
|
||||
import::handle_import_chatgpt(file_path, user_id, data_dir).await
|
||||
}
|
||||
Commands::Docs { action, project, output, ai_integration, data_dir } => {
|
||||
docs::handle_docs(action, project, output, ai_integration, data_dir).await
|
||||
}
|
||||
Commands::Submodules { action, module, all, dry_run, auto_commit, verbose, data_dir } => {
|
||||
submodules::handle_submodules(action, module, all, dry_run, auto_commit, verbose, data_dir).await
|
||||
}
|
||||
Commands::Tokens { command } => {
|
||||
tokens::handle_tokens(command).await
|
||||
}
|
||||
}
|
||||
}
|
||||
1951
src/mcp_server.rs
Normal file
1951
src/mcp_server.rs
Normal file
File diff suppressed because it is too large
Load Diff
306
src/memory.rs
Normal file
306
src/memory.rs
Normal file
@@ -0,0 +1,306 @@
|
||||
use std::collections::HashMap;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use anyhow::{Result, Context};
|
||||
use chrono::{DateTime, Utc};
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::config::Config;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Memory {
|
||||
pub id: String,
|
||||
pub user_id: String,
|
||||
pub content: String,
|
||||
pub summary: Option<String>,
|
||||
pub importance: f64,
|
||||
pub memory_type: MemoryType,
|
||||
pub created_at: DateTime<Utc>,
|
||||
pub last_accessed: DateTime<Utc>,
|
||||
pub access_count: u32,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub enum MemoryType {
|
||||
Interaction,
|
||||
Summary,
|
||||
Core,
|
||||
Forgotten,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct MemoryManager {
|
||||
memories: HashMap<String, Memory>,
|
||||
config: Config,
|
||||
}
|
||||
|
||||
impl MemoryManager {
|
||||
pub fn new(config: &Config) -> Result<Self> {
|
||||
let memories = Self::load_memories(config)?;
|
||||
|
||||
Ok(MemoryManager {
|
||||
memories,
|
||||
config: config.clone(),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn add_memory(&mut self, user_id: &str, content: &str, importance: f64) -> Result<String> {
|
||||
let memory_id = Uuid::new_v4().to_string();
|
||||
let now = Utc::now();
|
||||
|
||||
let memory = Memory {
|
||||
id: memory_id.clone(),
|
||||
user_id: user_id.to_string(),
|
||||
content: content.to_string(),
|
||||
summary: None,
|
||||
importance,
|
||||
memory_type: MemoryType::Interaction,
|
||||
created_at: now,
|
||||
last_accessed: now,
|
||||
access_count: 1,
|
||||
};
|
||||
|
||||
self.memories.insert(memory_id.clone(), memory);
|
||||
self.save_memories()?;
|
||||
|
||||
Ok(memory_id)
|
||||
}
|
||||
|
||||
pub fn get_memories(&mut self, user_id: &str, limit: usize) -> Vec<&Memory> {
|
||||
// Get immutable references for sorting
|
||||
let mut user_memory_ids: Vec<_> = self.memories
|
||||
.iter()
|
||||
.filter(|(_, m)| m.user_id == user_id)
|
||||
.map(|(id, memory)| {
|
||||
let score = memory.importance * 0.7 + (1.0 / ((Utc::now() - memory.created_at).num_hours() as f64 + 1.0)) * 0.3;
|
||||
(id.clone(), score)
|
||||
})
|
||||
.collect();
|
||||
|
||||
// Sort by score
|
||||
user_memory_ids.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap_or(std::cmp::Ordering::Equal));
|
||||
|
||||
// Update access information
|
||||
let now = Utc::now();
|
||||
|
||||
for (memory_id, _) in user_memory_ids.into_iter().take(limit) {
|
||||
if let Some(memory) = self.memories.get_mut(&memory_id) {
|
||||
memory.last_accessed = now;
|
||||
memory.access_count += 1;
|
||||
// We can't return mutable references here, so we'll need to adjust the return type
|
||||
}
|
||||
}
|
||||
|
||||
// Return immutable references
|
||||
self.memories
|
||||
.values()
|
||||
.filter(|m| m.user_id == user_id)
|
||||
.take(limit)
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn search_memories(&self, user_id: &str, keywords: &[String]) -> Vec<&Memory> {
|
||||
self.memories
|
||||
.values()
|
||||
.filter(|m| {
|
||||
m.user_id == user_id &&
|
||||
keywords.iter().any(|keyword| {
|
||||
m.content.to_lowercase().contains(&keyword.to_lowercase()) ||
|
||||
m.summary.as_ref().map_or(false, |s| s.to_lowercase().contains(&keyword.to_lowercase()))
|
||||
})
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn get_contextual_memories(&self, user_id: &str, query: &str, limit: usize) -> Vec<&Memory> {
|
||||
let query_lower = query.to_lowercase();
|
||||
let mut relevant_memories: Vec<_> = self.memories
|
||||
.values()
|
||||
.filter(|m| {
|
||||
m.user_id == user_id && (
|
||||
m.content.to_lowercase().contains(&query_lower) ||
|
||||
m.summary.as_ref().map_or(false, |s| s.to_lowercase().contains(&query_lower))
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
||||
// Sort by relevance (simple keyword matching for now)
|
||||
relevant_memories.sort_by(|a, b| {
|
||||
let score_a = Self::calculate_relevance_score(a, &query_lower);
|
||||
let score_b = Self::calculate_relevance_score(b, &query_lower);
|
||||
score_b.partial_cmp(&score_a).unwrap_or(std::cmp::Ordering::Equal)
|
||||
});
|
||||
|
||||
relevant_memories.into_iter().take(limit).collect()
|
||||
}
|
||||
|
||||
fn calculate_relevance_score(memory: &Memory, query: &str) -> f64 {
|
||||
let content_matches = memory.content.to_lowercase().matches(query).count() as f64;
|
||||
let summary_matches = memory.summary.as_ref()
|
||||
.map_or(0.0, |s| s.to_lowercase().matches(query).count() as f64);
|
||||
|
||||
let relevance = (content_matches + summary_matches) * memory.importance;
|
||||
let recency_bonus = 1.0 / ((Utc::now() - memory.created_at).num_days() as f64).max(1.0);
|
||||
|
||||
relevance + recency_bonus * 0.1
|
||||
}
|
||||
|
||||
pub fn create_summary(&mut self, user_id: &str, content: &str) -> Result<String> {
|
||||
// Simple summary creation (in real implementation, this would use AI)
|
||||
let summary = if content.len() > 100 {
|
||||
format!("{}...", &content[..97])
|
||||
} else {
|
||||
content.to_string()
|
||||
};
|
||||
|
||||
self.add_memory(user_id, &summary, 0.8)
|
||||
}
|
||||
|
||||
pub fn create_core_memory(&mut self, user_id: &str, content: &str) -> Result<String> {
|
||||
let memory_id = Uuid::new_v4().to_string();
|
||||
let now = Utc::now();
|
||||
|
||||
let memory = Memory {
|
||||
id: memory_id.clone(),
|
||||
user_id: user_id.to_string(),
|
||||
content: content.to_string(),
|
||||
summary: None,
|
||||
importance: 1.0, // Core memories have maximum importance
|
||||
memory_type: MemoryType::Core,
|
||||
created_at: now,
|
||||
last_accessed: now,
|
||||
access_count: 1,
|
||||
};
|
||||
|
||||
self.memories.insert(memory_id.clone(), memory);
|
||||
self.save_memories()?;
|
||||
|
||||
Ok(memory_id)
|
||||
}
|
||||
|
||||
pub fn get_memory_stats(&self, user_id: &str) -> MemoryStats {
|
||||
let user_memories: Vec<_> = self.memories
|
||||
.values()
|
||||
.filter(|m| m.user_id == user_id)
|
||||
.collect();
|
||||
|
||||
let total_memories = user_memories.len();
|
||||
let core_memories = user_memories.iter()
|
||||
.filter(|m| matches!(m.memory_type, MemoryType::Core))
|
||||
.count();
|
||||
let summary_memories = user_memories.iter()
|
||||
.filter(|m| matches!(m.memory_type, MemoryType::Summary))
|
||||
.count();
|
||||
let interaction_memories = user_memories.iter()
|
||||
.filter(|m| matches!(m.memory_type, MemoryType::Interaction))
|
||||
.count();
|
||||
|
||||
let avg_importance = if total_memories > 0 {
|
||||
user_memories.iter().map(|m| m.importance).sum::<f64>() / total_memories as f64
|
||||
} else {
|
||||
0.0
|
||||
};
|
||||
|
||||
MemoryStats {
|
||||
total_memories,
|
||||
core_memories,
|
||||
summary_memories,
|
||||
interaction_memories,
|
||||
avg_importance,
|
||||
}
|
||||
}
|
||||
|
||||
fn load_memories(config: &Config) -> Result<HashMap<String, Memory>> {
|
||||
let file_path = config.memory_file();
|
||||
if !file_path.exists() {
|
||||
return Ok(HashMap::new());
|
||||
}
|
||||
|
||||
let content = std::fs::read_to_string(file_path)
|
||||
.context("Failed to read memories file")?;
|
||||
|
||||
let memories: HashMap<String, Memory> = serde_json::from_str(&content)
|
||||
.context("Failed to parse memories file")?;
|
||||
|
||||
Ok(memories)
|
||||
}
|
||||
|
||||
fn save_memories(&self) -> Result<()> {
|
||||
let content = serde_json::to_string_pretty(&self.memories)
|
||||
.context("Failed to serialize memories")?;
|
||||
|
||||
std::fs::write(&self.config.memory_file(), content)
|
||||
.context("Failed to write memories file")?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn get_stats(&self) -> Result<MemoryStats> {
|
||||
let total_memories = self.memories.len();
|
||||
let core_memories = self.memories.values()
|
||||
.filter(|m| matches!(m.memory_type, MemoryType::Core))
|
||||
.count();
|
||||
let summary_memories = self.memories.values()
|
||||
.filter(|m| matches!(m.memory_type, MemoryType::Summary))
|
||||
.count();
|
||||
let interaction_memories = self.memories.values()
|
||||
.filter(|m| matches!(m.memory_type, MemoryType::Interaction))
|
||||
.count();
|
||||
|
||||
let avg_importance = if total_memories > 0 {
|
||||
self.memories.values().map(|m| m.importance).sum::<f64>() / total_memories as f64
|
||||
} else {
|
||||
0.0
|
||||
};
|
||||
|
||||
Ok(MemoryStats {
|
||||
total_memories,
|
||||
core_memories,
|
||||
summary_memories,
|
||||
interaction_memories,
|
||||
avg_importance,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn run_maintenance(&mut self) -> Result<()> {
|
||||
// Cleanup old, low-importance memories
|
||||
let cutoff_date = Utc::now() - chrono::Duration::days(30);
|
||||
let memory_ids_to_remove: Vec<String> = self.memories
|
||||
.iter()
|
||||
.filter(|(_, m)| {
|
||||
m.importance < 0.3
|
||||
&& m.created_at < cutoff_date
|
||||
&& m.access_count <= 1
|
||||
&& !matches!(m.memory_type, MemoryType::Core)
|
||||
})
|
||||
.map(|(id, _)| id.clone())
|
||||
.collect();
|
||||
|
||||
for id in memory_ids_to_remove {
|
||||
self.memories.remove(&id);
|
||||
}
|
||||
|
||||
// Mark old memories as forgotten instead of deleting
|
||||
let forgotten_cutoff = Utc::now() - chrono::Duration::days(90);
|
||||
for memory in self.memories.values_mut() {
|
||||
if memory.created_at < forgotten_cutoff
|
||||
&& memory.importance < 0.2
|
||||
&& !matches!(memory.memory_type, MemoryType::Core) {
|
||||
memory.memory_type = MemoryType::Forgotten;
|
||||
}
|
||||
}
|
||||
|
||||
// Save changes
|
||||
self.save_memories()?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct MemoryStats {
|
||||
pub total_memories: usize,
|
||||
pub core_memories: usize,
|
||||
pub summary_memories: usize,
|
||||
pub interaction_memories: usize,
|
||||
pub avg_importance: f64,
|
||||
}
|
||||
390
src/openai_provider.rs
Normal file
390
src/openai_provider.rs
Normal file
@@ -0,0 +1,390 @@
|
||||
use anyhow::Result;
|
||||
use async_openai::{
|
||||
types::{
|
||||
ChatCompletionRequestMessage,
|
||||
CreateChatCompletionRequestArgs, ChatCompletionTool, ChatCompletionToolType,
|
||||
FunctionObject, ChatCompletionRequestToolMessage,
|
||||
ChatCompletionRequestAssistantMessage, ChatCompletionRequestUserMessage,
|
||||
ChatCompletionRequestSystemMessage, ChatCompletionToolChoiceOption
|
||||
},
|
||||
Client,
|
||||
};
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use crate::http_client::ServiceClient;
|
||||
|
||||
/// OpenAI provider with MCP tools support (matching Python implementation)
|
||||
pub struct OpenAIProvider {
|
||||
client: Client<async_openai::config::OpenAIConfig>,
|
||||
model: String,
|
||||
service_client: ServiceClient,
|
||||
system_prompt: Option<String>,
|
||||
}
|
||||
|
||||
impl OpenAIProvider {
|
||||
pub fn new(api_key: String, model: Option<String>) -> Self {
|
||||
let config = async_openai::config::OpenAIConfig::new()
|
||||
.with_api_key(api_key);
|
||||
let client = Client::with_config(config);
|
||||
|
||||
Self {
|
||||
client,
|
||||
model: model.unwrap_or_else(|| "gpt-4".to_string()),
|
||||
service_client: ServiceClient::new(),
|
||||
system_prompt: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn with_system_prompt(mut self, prompt: String) -> Self {
|
||||
self.system_prompt = Some(prompt);
|
||||
self
|
||||
}
|
||||
|
||||
/// Generate OpenAI tools from MCP endpoints (matching Python implementation)
|
||||
fn get_mcp_tools(&self) -> Vec<ChatCompletionTool> {
|
||||
let tools = vec![
|
||||
// Memory tools
|
||||
ChatCompletionTool {
|
||||
r#type: ChatCompletionToolType::Function,
|
||||
function: FunctionObject {
|
||||
name: "get_memories".to_string(),
|
||||
description: Some("過去の会話記憶を取得します。「覚えている」「前回」「以前」などの質問で必ず使用してください".to_string()),
|
||||
parameters: Some(json!({
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"limit": {
|
||||
"type": "integer",
|
||||
"description": "取得する記憶の数",
|
||||
"default": 5
|
||||
}
|
||||
}
|
||||
})),
|
||||
},
|
||||
},
|
||||
ChatCompletionTool {
|
||||
r#type: ChatCompletionToolType::Function,
|
||||
function: FunctionObject {
|
||||
name: "search_memories".to_string(),
|
||||
description: Some("特定のトピックについて話した記憶を検索します。「プログラミングについて」「○○について話した」などの質問で使用してください".to_string()),
|
||||
parameters: Some(json!({
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"keywords": {
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
"description": "検索キーワードの配列"
|
||||
}
|
||||
},
|
||||
"required": ["keywords"]
|
||||
})),
|
||||
},
|
||||
},
|
||||
ChatCompletionTool {
|
||||
r#type: ChatCompletionToolType::Function,
|
||||
function: FunctionObject {
|
||||
name: "get_contextual_memories".to_string(),
|
||||
description: Some("クエリに関連する文脈的記憶を取得します".to_string()),
|
||||
parameters: Some(json!({
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"query": {
|
||||
"type": "string",
|
||||
"description": "検索クエリ"
|
||||
},
|
||||
"limit": {
|
||||
"type": "integer",
|
||||
"description": "取得する記憶の数",
|
||||
"default": 5
|
||||
}
|
||||
},
|
||||
"required": ["query"]
|
||||
})),
|
||||
},
|
||||
},
|
||||
ChatCompletionTool {
|
||||
r#type: ChatCompletionToolType::Function,
|
||||
function: FunctionObject {
|
||||
name: "get_relationship".to_string(),
|
||||
description: Some("特定ユーザーとの関係性情報を取得します".to_string()),
|
||||
parameters: Some(json!({
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"user_id": {
|
||||
"type": "string",
|
||||
"description": "ユーザーID"
|
||||
}
|
||||
},
|
||||
"required": ["user_id"]
|
||||
})),
|
||||
},
|
||||
},
|
||||
// ai.card tools
|
||||
ChatCompletionTool {
|
||||
r#type: ChatCompletionToolType::Function,
|
||||
function: FunctionObject {
|
||||
name: "card_get_user_cards".to_string(),
|
||||
description: Some("ユーザーが所有するカードの一覧を取得します".to_string()),
|
||||
parameters: Some(json!({
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"did": {
|
||||
"type": "string",
|
||||
"description": "ユーザーのDID"
|
||||
},
|
||||
"limit": {
|
||||
"type": "integer",
|
||||
"description": "取得するカード数の上限",
|
||||
"default": 10
|
||||
}
|
||||
},
|
||||
"required": ["did"]
|
||||
})),
|
||||
},
|
||||
},
|
||||
ChatCompletionTool {
|
||||
r#type: ChatCompletionToolType::Function,
|
||||
function: FunctionObject {
|
||||
name: "card_draw_card".to_string(),
|
||||
description: Some("ガチャを引いてカードを取得します".to_string()),
|
||||
parameters: Some(json!({
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"did": {
|
||||
"type": "string",
|
||||
"description": "ユーザーのDID"
|
||||
},
|
||||
"is_paid": {
|
||||
"type": "boolean",
|
||||
"description": "有料ガチャかどうか",
|
||||
"default": false
|
||||
}
|
||||
},
|
||||
"required": ["did"]
|
||||
})),
|
||||
},
|
||||
},
|
||||
ChatCompletionTool {
|
||||
r#type: ChatCompletionToolType::Function,
|
||||
function: FunctionObject {
|
||||
name: "card_analyze_collection".to_string(),
|
||||
description: Some("ユーザーのカードコレクションを分析します".to_string()),
|
||||
parameters: Some(json!({
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"did": {
|
||||
"type": "string",
|
||||
"description": "ユーザーのDID"
|
||||
}
|
||||
},
|
||||
"required": ["did"]
|
||||
})),
|
||||
},
|
||||
},
|
||||
ChatCompletionTool {
|
||||
r#type: ChatCompletionToolType::Function,
|
||||
function: FunctionObject {
|
||||
name: "card_get_gacha_stats".to_string(),
|
||||
description: Some("ガチャの統計情報を取得します".to_string()),
|
||||
parameters: Some(json!({
|
||||
"type": "object",
|
||||
"properties": {}
|
||||
})),
|
||||
},
|
||||
},
|
||||
];
|
||||
|
||||
tools
|
||||
}
|
||||
|
||||
/// Chat interface with MCP function calling support (matching Python implementation)
|
||||
pub async fn chat_with_mcp(&self, prompt: String, user_id: String) -> Result<String> {
|
||||
let tools = self.get_mcp_tools();
|
||||
|
||||
let system_content = self.system_prompt.as_deref().unwrap_or(
|
||||
"あなたは記憶システムと関係性データ、カードゲームシステムにアクセスできるAIです。\n\n【重要】以下の場合は必ずツールを使用してください:\n\n1. カード関連の質問:\n- 「カード」「コレクション」「ガチャ」「見せて」「持っている」「状況」「どんなカード」などのキーワードがある場合\n- card_get_user_cardsツールを使用してユーザーのカード情報を取得\n\n2. 記憶・関係性の質問:\n- 「覚えている」「前回」「以前」「について話した」「関係」などのキーワードがある場合\n- 適切なメモリツールを使用\n\n3. パラメータの設定:\n- didパラメータには現在会話しているユーザーのID(例:'syui')を使用\n- ツールを積極的に使用して正確な情報を提供してください\n\nユーザーが何かを尋ねた時は、まず関連するツールがあるかを考え、適切なツールを使用してから回答してください。"
|
||||
);
|
||||
|
||||
let request = CreateChatCompletionRequestArgs::default()
|
||||
.model(&self.model)
|
||||
.messages(vec![
|
||||
ChatCompletionRequestMessage::System(
|
||||
ChatCompletionRequestSystemMessage {
|
||||
content: system_content.to_string().into(),
|
||||
name: None,
|
||||
}
|
||||
),
|
||||
ChatCompletionRequestMessage::User(
|
||||
ChatCompletionRequestUserMessage {
|
||||
content: prompt.clone().into(),
|
||||
name: None,
|
||||
}
|
||||
),
|
||||
])
|
||||
.tools(tools)
|
||||
.tool_choice(ChatCompletionToolChoiceOption::Auto)
|
||||
.max_tokens(2000u16)
|
||||
.temperature(0.7)
|
||||
.build()?;
|
||||
|
||||
let response = self.client.chat().create(request).await?;
|
||||
let message = &response.choices[0].message;
|
||||
|
||||
// Handle tool calls
|
||||
if let Some(tool_calls) = &message.tool_calls {
|
||||
if tool_calls.is_empty() {
|
||||
println!("🔧 [OpenAI] No tools called");
|
||||
} else {
|
||||
println!("🔧 [OpenAI] {} tools called:", tool_calls.len());
|
||||
for tc in tool_calls {
|
||||
println!(" - {}({})", tc.function.name, tc.function.arguments);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
println!("🔧 [OpenAI] No tools called");
|
||||
}
|
||||
|
||||
// Process tool calls if any
|
||||
if let Some(tool_calls) = &message.tool_calls {
|
||||
if !tool_calls.is_empty() {
|
||||
|
||||
let mut messages = vec![
|
||||
ChatCompletionRequestMessage::System(
|
||||
ChatCompletionRequestSystemMessage {
|
||||
content: system_content.to_string().into(),
|
||||
name: None,
|
||||
}
|
||||
),
|
||||
ChatCompletionRequestMessage::User(
|
||||
ChatCompletionRequestUserMessage {
|
||||
content: prompt.into(),
|
||||
name: None,
|
||||
}
|
||||
),
|
||||
ChatCompletionRequestMessage::Assistant(
|
||||
ChatCompletionRequestAssistantMessage {
|
||||
content: message.content.clone(),
|
||||
name: None,
|
||||
tool_calls: message.tool_calls.clone(),
|
||||
function_call: None,
|
||||
}
|
||||
),
|
||||
];
|
||||
|
||||
// Execute each tool call
|
||||
for tool_call in tool_calls {
|
||||
println!("🌐 [MCP] Executing {}...", tool_call.function.name);
|
||||
let tool_result = self.execute_mcp_tool(tool_call, &user_id).await?;
|
||||
let result_preview = serde_json::to_string(&tool_result)?;
|
||||
let preview = if result_preview.chars().count() > 100 {
|
||||
format!("{}...", result_preview.chars().take(100).collect::<String>())
|
||||
} else {
|
||||
result_preview.clone()
|
||||
};
|
||||
println!("✅ [MCP] Result: {}", preview);
|
||||
|
||||
messages.push(ChatCompletionRequestMessage::Tool(
|
||||
ChatCompletionRequestToolMessage {
|
||||
content: serde_json::to_string(&tool_result)?,
|
||||
tool_call_id: tool_call.id.clone(),
|
||||
}
|
||||
));
|
||||
}
|
||||
|
||||
// Get final response with tool outputs
|
||||
let final_request = CreateChatCompletionRequestArgs::default()
|
||||
.model(&self.model)
|
||||
.messages(messages)
|
||||
.max_tokens(2000u16)
|
||||
.temperature(0.7)
|
||||
.build()?;
|
||||
|
||||
let final_response = self.client.chat().create(final_request).await?;
|
||||
Ok(final_response.choices[0].message.content.as_ref().unwrap_or(&"".to_string()).clone())
|
||||
} else {
|
||||
// No tools were called
|
||||
Ok(message.content.as_ref().unwrap_or(&"".to_string()).clone())
|
||||
}
|
||||
} else {
|
||||
// No tool_calls field at all
|
||||
Ok(message.content.as_ref().unwrap_or(&"".to_string()).clone())
|
||||
}
|
||||
}
|
||||
|
||||
/// Execute MCP tool call (matching Python implementation)
|
||||
async fn execute_mcp_tool(&self, tool_call: &async_openai::types::ChatCompletionMessageToolCall, context_user_id: &str) -> Result<Value> {
|
||||
let function_name = &tool_call.function.name;
|
||||
let arguments: Value = serde_json::from_str(&tool_call.function.arguments)?;
|
||||
|
||||
match function_name.as_str() {
|
||||
"get_memories" => {
|
||||
let limit = arguments.get("limit").and_then(|v| v.as_i64()).unwrap_or(5);
|
||||
// TODO: Implement actual MCP call
|
||||
Ok(json!({"info": "記憶機能は実装中です"}))
|
||||
}
|
||||
"search_memories" => {
|
||||
let _keywords = arguments.get("keywords").and_then(|v| v.as_array());
|
||||
// TODO: Implement actual MCP call
|
||||
Ok(json!({"info": "記憶検索機能は実装中です"}))
|
||||
}
|
||||
"get_contextual_memories" => {
|
||||
let _query = arguments.get("query").and_then(|v| v.as_str()).unwrap_or("");
|
||||
let _limit = arguments.get("limit").and_then(|v| v.as_i64()).unwrap_or(5);
|
||||
// TODO: Implement actual MCP call
|
||||
Ok(json!({"info": "文脈記憶機能は実装中です"}))
|
||||
}
|
||||
"get_relationship" => {
|
||||
let _user_id = arguments.get("user_id").and_then(|v| v.as_str()).unwrap_or(context_user_id);
|
||||
// TODO: Implement actual MCP call
|
||||
Ok(json!({"info": "関係性機能は実装中です"}))
|
||||
}
|
||||
// ai.card tools
|
||||
"card_get_user_cards" => {
|
||||
let did = arguments.get("did").and_then(|v| v.as_str()).unwrap_or(context_user_id);
|
||||
let _limit = arguments.get("limit").and_then(|v| v.as_i64()).unwrap_or(10);
|
||||
|
||||
match self.service_client.get_user_cards(did).await {
|
||||
Ok(result) => Ok(result),
|
||||
Err(e) => {
|
||||
println!("❌ ai.card API error: {}", e);
|
||||
Ok(json!({
|
||||
"error": "ai.cardサーバーが起動していません",
|
||||
"message": "カードシステムを使用するには、ai.cardサーバーを起動してください"
|
||||
}))
|
||||
}
|
||||
}
|
||||
}
|
||||
"card_draw_card" => {
|
||||
let did = arguments.get("did").and_then(|v| v.as_str()).unwrap_or(context_user_id);
|
||||
let is_paid = arguments.get("is_paid").and_then(|v| v.as_bool()).unwrap_or(false);
|
||||
|
||||
match self.service_client.draw_card(did, is_paid).await {
|
||||
Ok(result) => Ok(result),
|
||||
Err(e) => {
|
||||
println!("❌ ai.card API error: {}", e);
|
||||
Ok(json!({
|
||||
"error": "ai.cardサーバーが起動していません",
|
||||
"message": "カードシステムを使用するには、ai.cardサーバーを起動してください"
|
||||
}))
|
||||
}
|
||||
}
|
||||
}
|
||||
"card_analyze_collection" => {
|
||||
let did = arguments.get("did").and_then(|v| v.as_str()).unwrap_or(context_user_id);
|
||||
// TODO: Implement collection analysis endpoint
|
||||
Ok(json!({
|
||||
"info": "コレクション分析機能は実装中です",
|
||||
"user_did": did
|
||||
}))
|
||||
}
|
||||
"card_get_gacha_stats" => {
|
||||
// TODO: Implement gacha stats endpoint
|
||||
Ok(json!({"info": "ガチャ統計機能は実装中です"}))
|
||||
}
|
||||
_ => {
|
||||
Ok(json!({
|
||||
"error": format!("Unknown tool: {}", function_name)
|
||||
}))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
369
src/persona.rs
Normal file
369
src/persona.rs
Normal file
@@ -0,0 +1,369 @@
|
||||
use std::collections::HashMap;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use anyhow::Result;
|
||||
|
||||
use crate::config::Config;
|
||||
use crate::memory::{MemoryManager, MemoryStats, Memory};
|
||||
use crate::relationship::{RelationshipTracker, Relationship as RelationshipData, RelationshipStats};
|
||||
use crate::ai_provider::{AIProviderClient, ChatMessage};
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Persona {
|
||||
config: Config,
|
||||
#[serde(skip)]
|
||||
memory_manager: Option<MemoryManager>,
|
||||
#[serde(skip)]
|
||||
relationship_tracker: Option<RelationshipTracker>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct PersonaState {
|
||||
pub current_mood: String,
|
||||
pub fortune_value: i32,
|
||||
pub breakthrough_triggered: bool,
|
||||
pub base_personality: HashMap<String, f64>,
|
||||
}
|
||||
|
||||
|
||||
impl Persona {
|
||||
pub fn new(config: &Config) -> Result<Self> {
|
||||
let memory_manager = MemoryManager::new(config)?;
|
||||
let relationship_tracker = RelationshipTracker::new(config)?;
|
||||
|
||||
Ok(Persona {
|
||||
config: config.clone(),
|
||||
memory_manager: Some(memory_manager),
|
||||
relationship_tracker: Some(relationship_tracker),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_current_state(&self) -> Result<PersonaState> {
|
||||
// Load fortune
|
||||
let fortune_value = self.load_today_fortune()?;
|
||||
|
||||
// Create base personality
|
||||
let mut base_personality = HashMap::new();
|
||||
base_personality.insert("curiosity".to_string(), 0.7);
|
||||
base_personality.insert("empathy".to_string(), 0.8);
|
||||
base_personality.insert("creativity".to_string(), 0.6);
|
||||
base_personality.insert("analytical".to_string(), 0.9);
|
||||
base_personality.insert("emotional".to_string(), 0.4);
|
||||
|
||||
// Determine mood based on fortune
|
||||
let current_mood = match fortune_value {
|
||||
1..=3 => "Contemplative",
|
||||
4..=6 => "Neutral",
|
||||
7..=8 => "Optimistic",
|
||||
9..=10 => "Energetic",
|
||||
_ => "Unknown",
|
||||
};
|
||||
|
||||
Ok(PersonaState {
|
||||
current_mood: current_mood.to_string(),
|
||||
fortune_value,
|
||||
breakthrough_triggered: fortune_value >= 9,
|
||||
base_personality,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_relationship(&self, user_id: &str) -> Option<&RelationshipData> {
|
||||
self.relationship_tracker.as_ref()
|
||||
.and_then(|tracker| tracker.get_relationship(user_id))
|
||||
}
|
||||
|
||||
pub fn process_interaction(&mut self, user_id: &str, message: &str) -> Result<(String, f64)> {
|
||||
// Add memory
|
||||
if let Some(memory_manager) = &mut self.memory_manager {
|
||||
memory_manager.add_memory(user_id, message, 0.5)?;
|
||||
}
|
||||
|
||||
// Calculate sentiment (simple keyword-based for now)
|
||||
let sentiment = self.calculate_sentiment(message);
|
||||
|
||||
// Update relationship
|
||||
let relationship_delta = if let Some(relationship_tracker) = &mut self.relationship_tracker {
|
||||
relationship_tracker.process_interaction(user_id, sentiment)?
|
||||
} else {
|
||||
0.0
|
||||
};
|
||||
|
||||
// Generate response (simple for now)
|
||||
let response = format!("I understand your message: '{}'", message);
|
||||
|
||||
Ok((response, relationship_delta))
|
||||
}
|
||||
|
||||
pub async fn process_ai_interaction(&mut self, user_id: &str, message: &str, provider: Option<String>, model: Option<String>) -> Result<(String, f64)> {
|
||||
// Add memory for user message
|
||||
if let Some(memory_manager) = &mut self.memory_manager {
|
||||
memory_manager.add_memory(user_id, message, 0.5)?;
|
||||
}
|
||||
|
||||
// Calculate sentiment
|
||||
let sentiment = self.calculate_sentiment(message);
|
||||
|
||||
// Update relationship
|
||||
let relationship_delta = if let Some(relationship_tracker) = &mut self.relationship_tracker {
|
||||
relationship_tracker.process_interaction(user_id, sentiment)?
|
||||
} else {
|
||||
0.0
|
||||
};
|
||||
|
||||
// Check provider type and use appropriate client
|
||||
let response = if provider.as_deref() == Some("openai") {
|
||||
// Use OpenAI provider with MCP tools
|
||||
use crate::openai_provider::OpenAIProvider;
|
||||
|
||||
// Get OpenAI API key from config or environment
|
||||
let api_key = std::env::var("OPENAI_API_KEY")
|
||||
.or_else(|_| {
|
||||
self.config.providers.get("openai")
|
||||
.and_then(|p| p.api_key.clone())
|
||||
.ok_or_else(|| std::env::VarError::NotPresent)
|
||||
})
|
||||
.map_err(|_| anyhow::anyhow!("OpenAI API key not found. Set OPENAI_API_KEY environment variable or add to config."))?;
|
||||
|
||||
let openai_model = model.unwrap_or_else(|| "gpt-4".to_string());
|
||||
let openai_provider = OpenAIProvider::new(api_key, Some(openai_model));
|
||||
|
||||
// Use OpenAI with MCP tools support
|
||||
openai_provider.chat_with_mcp(message.to_string(), user_id.to_string()).await?
|
||||
} else {
|
||||
// Use existing AI provider (Ollama)
|
||||
let ai_config = self.config.get_ai_config(provider, model)?;
|
||||
let ai_client = AIProviderClient::new(ai_config);
|
||||
|
||||
// Build conversation context
|
||||
let mut messages = Vec::new();
|
||||
|
||||
// Get recent memories for context
|
||||
if let Some(memory_manager) = &mut self.memory_manager {
|
||||
let recent_memories = memory_manager.get_memories(user_id, 5);
|
||||
if !recent_memories.is_empty() {
|
||||
let context = recent_memories.iter()
|
||||
.map(|m| m.content.clone())
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n");
|
||||
messages.push(ChatMessage::system(format!("Previous conversation context:\n{}", context)));
|
||||
}
|
||||
}
|
||||
|
||||
// Add current message
|
||||
messages.push(ChatMessage::user(message));
|
||||
|
||||
// Generate system prompt based on personality and relationship
|
||||
let system_prompt = self.generate_system_prompt(user_id);
|
||||
|
||||
// Get AI response
|
||||
match ai_client.chat(messages, Some(system_prompt)).await {
|
||||
Ok(chat_response) => chat_response.content,
|
||||
Err(_) => {
|
||||
// Fallback to simple response if AI fails
|
||||
format!("I understand your message: '{}'", message)
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Store AI response in memory
|
||||
if let Some(memory_manager) = &mut self.memory_manager {
|
||||
memory_manager.add_memory(user_id, &format!("AI: {}", response), 0.3)?;
|
||||
}
|
||||
|
||||
Ok((response, relationship_delta))
|
||||
}
|
||||
|
||||
fn generate_system_prompt(&self, user_id: &str) -> String {
|
||||
let mut prompt = String::from("You are a helpful AI assistant with a unique personality. ");
|
||||
|
||||
// Add personality based on current state
|
||||
if let Ok(state) = self.get_current_state() {
|
||||
prompt.push_str(&format!("Your current mood is {}. ", state.current_mood));
|
||||
|
||||
if state.breakthrough_triggered {
|
||||
prompt.push_str("You are feeling particularly inspired today! ");
|
||||
}
|
||||
|
||||
// Add personality traits
|
||||
let mut traits = Vec::new();
|
||||
for (trait_name, value) in &state.base_personality {
|
||||
if *value > 0.7 {
|
||||
traits.push(trait_name.clone());
|
||||
}
|
||||
}
|
||||
|
||||
if !traits.is_empty() {
|
||||
prompt.push_str(&format!("Your dominant traits are: {}. ", traits.join(", ")));
|
||||
}
|
||||
}
|
||||
|
||||
// Add relationship context
|
||||
if let Some(relationship) = self.get_relationship(user_id) {
|
||||
match relationship.status.to_string().as_str() {
|
||||
"new" => prompt.push_str("This is a new relationship, be welcoming but cautious. "),
|
||||
"friend" => prompt.push_str("You have a friendly relationship with this user. "),
|
||||
"close_friend" => prompt.push_str("This is a close friend, be warm and personal. "),
|
||||
"broken" => prompt.push_str("This relationship is strained, be formal and distant. "),
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
prompt.push_str("Keep responses concise and natural. Avoid being overly formal or robotic.");
|
||||
|
||||
prompt
|
||||
}
|
||||
|
||||
fn calculate_sentiment(&self, message: &str) -> f64 {
|
||||
// Simple sentiment analysis based on keywords
|
||||
let positive_words = ["good", "great", "awesome", "love", "like", "happy", "thank"];
|
||||
let negative_words = ["bad", "hate", "awful", "terrible", "angry", "sad"];
|
||||
|
||||
let message_lower = message.to_lowercase();
|
||||
let positive_count = positive_words.iter()
|
||||
.filter(|word| message_lower.contains(*word))
|
||||
.count() as f64;
|
||||
let negative_count = negative_words.iter()
|
||||
.filter(|word| message_lower.contains(*word))
|
||||
.count() as f64;
|
||||
|
||||
(positive_count - negative_count).max(-1.0).min(1.0)
|
||||
}
|
||||
|
||||
pub fn get_memories(&mut self, user_id: &str, limit: usize) -> Vec<String> {
|
||||
if let Some(memory_manager) = &mut self.memory_manager {
|
||||
memory_manager.get_memories(user_id, limit)
|
||||
.into_iter()
|
||||
.map(|m| m.content.clone())
|
||||
.collect()
|
||||
} else {
|
||||
Vec::new()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn search_memories(&self, user_id: &str, keywords: &[String]) -> Vec<String> {
|
||||
if let Some(memory_manager) = &self.memory_manager {
|
||||
memory_manager.search_memories(user_id, keywords)
|
||||
.into_iter()
|
||||
.map(|m| m.content.clone())
|
||||
.collect()
|
||||
} else {
|
||||
Vec::new()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_memory_stats(&self, user_id: &str) -> Option<MemoryStats> {
|
||||
self.memory_manager.as_ref()
|
||||
.map(|manager| manager.get_memory_stats(user_id))
|
||||
}
|
||||
|
||||
pub fn get_relationship_stats(&self) -> Option<RelationshipStats> {
|
||||
self.relationship_tracker.as_ref()
|
||||
.map(|tracker| tracker.get_relationship_stats())
|
||||
}
|
||||
|
||||
pub fn add_memory(&mut self, memory: Memory) -> Result<()> {
|
||||
if let Some(memory_manager) = &mut self.memory_manager {
|
||||
memory_manager.add_memory(&memory.user_id, &memory.content, memory.importance)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn update_relationship(&mut self, user_id: &str, delta: f64) -> Result<()> {
|
||||
if let Some(relationship_tracker) = &mut self.relationship_tracker {
|
||||
relationship_tracker.process_interaction(user_id, delta)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn daily_maintenance(&mut self) -> Result<()> {
|
||||
// Apply time decay to relationships
|
||||
if let Some(relationship_tracker) = &mut self.relationship_tracker {
|
||||
relationship_tracker.apply_time_decay()?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn load_today_fortune(&self) -> Result<i32> {
|
||||
// Try to load existing fortune for today
|
||||
if let Ok(content) = std::fs::read_to_string(self.config.fortune_file()) {
|
||||
if let Ok(fortune_data) = serde_json::from_str::<serde_json::Value>(&content) {
|
||||
let today = chrono::Utc::now().format("%Y-%m-%d").to_string();
|
||||
if let Some(fortune) = fortune_data.get(&today) {
|
||||
if let Some(value) = fortune.as_i64() {
|
||||
return Ok(value as i32);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Generate new fortune for today (1-10)
|
||||
use std::collections::hash_map::DefaultHasher;
|
||||
use std::hash::{Hash, Hasher};
|
||||
|
||||
let today = chrono::Utc::now().format("%Y-%m-%d").to_string();
|
||||
let mut hasher = DefaultHasher::new();
|
||||
today.hash(&mut hasher);
|
||||
let hash = hasher.finish();
|
||||
|
||||
let fortune = (hash % 10) as i32 + 1;
|
||||
|
||||
// Save fortune
|
||||
let mut fortune_data = if let Ok(content) = std::fs::read_to_string(self.config.fortune_file()) {
|
||||
serde_json::from_str(&content).unwrap_or_else(|_| serde_json::json!({}))
|
||||
} else {
|
||||
serde_json::json!({})
|
||||
};
|
||||
|
||||
fortune_data[today] = serde_json::json!(fortune);
|
||||
|
||||
if let Ok(content) = serde_json::to_string_pretty(&fortune_data) {
|
||||
let _ = std::fs::write(self.config.fortune_file(), content);
|
||||
}
|
||||
|
||||
Ok(fortune)
|
||||
}
|
||||
|
||||
pub fn list_all_relationships(&self) -> HashMap<String, RelationshipData> {
|
||||
if let Some(tracker) = &self.relationship_tracker {
|
||||
tracker.list_all_relationships().clone()
|
||||
} else {
|
||||
HashMap::new()
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn process_message(&mut self, user_id: &str, message: &str) -> Result<ChatMessage> {
|
||||
let (_response, _delta) = self.process_ai_interaction(user_id, message, None, None).await?;
|
||||
Ok(ChatMessage::assistant(&_response))
|
||||
}
|
||||
|
||||
pub fn get_fortune(&self) -> Result<i32> {
|
||||
self.load_today_fortune()
|
||||
}
|
||||
|
||||
pub fn generate_new_fortune(&self) -> Result<i32> {
|
||||
use std::collections::hash_map::DefaultHasher;
|
||||
use std::hash::{Hash, Hasher};
|
||||
|
||||
let today = chrono::Utc::now().format("%Y-%m-%d").to_string();
|
||||
let mut hasher = DefaultHasher::new();
|
||||
today.hash(&mut hasher);
|
||||
let hash = hasher.finish();
|
||||
|
||||
let fortune = (hash % 10) as i32 + 1;
|
||||
|
||||
// Save fortune
|
||||
let mut fortune_data = if let Ok(content) = std::fs::read_to_string(self.config.fortune_file()) {
|
||||
serde_json::from_str(&content).unwrap_or_else(|_| serde_json::json!({}))
|
||||
} else {
|
||||
serde_json::json!({})
|
||||
};
|
||||
|
||||
fortune_data[today] = serde_json::json!(fortune);
|
||||
|
||||
if let Ok(content) = serde_json::to_string_pretty(&fortune_data) {
|
||||
let _ = std::fs::write(self.config.fortune_file(), content);
|
||||
}
|
||||
|
||||
Ok(fortune)
|
||||
}
|
||||
}
|
||||
306
src/relationship.rs
Normal file
306
src/relationship.rs
Normal file
@@ -0,0 +1,306 @@
|
||||
use std::collections::HashMap;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use anyhow::{Result, Context};
|
||||
use chrono::{DateTime, Utc};
|
||||
|
||||
use crate::config::Config;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Relationship {
|
||||
pub user_id: String,
|
||||
pub score: f64,
|
||||
pub threshold: f64,
|
||||
pub status: RelationshipStatus,
|
||||
pub total_interactions: u32,
|
||||
pub positive_interactions: u32,
|
||||
pub negative_interactions: u32,
|
||||
pub transmission_enabled: bool,
|
||||
pub is_broken: bool,
|
||||
pub last_interaction: Option<DateTime<Utc>>,
|
||||
pub last_transmission: Option<DateTime<Utc>>,
|
||||
pub created_at: DateTime<Utc>,
|
||||
pub daily_interaction_count: u32,
|
||||
pub last_daily_reset: DateTime<Utc>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub enum RelationshipStatus {
|
||||
New,
|
||||
Acquaintance,
|
||||
Friend,
|
||||
CloseFriend,
|
||||
Broken,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for RelationshipStatus {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
RelationshipStatus::New => write!(f, "new"),
|
||||
RelationshipStatus::Acquaintance => write!(f, "acquaintance"),
|
||||
RelationshipStatus::Friend => write!(f, "friend"),
|
||||
RelationshipStatus::CloseFriend => write!(f, "close_friend"),
|
||||
RelationshipStatus::Broken => write!(f, "broken"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct RelationshipTracker {
|
||||
relationships: HashMap<String, Relationship>,
|
||||
config: Config,
|
||||
}
|
||||
|
||||
impl RelationshipTracker {
|
||||
pub fn new(config: &Config) -> Result<Self> {
|
||||
let relationships = Self::load_relationships(config)?;
|
||||
|
||||
Ok(RelationshipTracker {
|
||||
relationships,
|
||||
config: config.clone(),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_or_create_relationship(&mut self, user_id: &str) -> &mut Relationship {
|
||||
let now = Utc::now();
|
||||
|
||||
self.relationships.entry(user_id.to_string()).or_insert_with(|| {
|
||||
Relationship {
|
||||
user_id: user_id.to_string(),
|
||||
score: 0.0,
|
||||
threshold: 10.0, // Default threshold for transmission
|
||||
status: RelationshipStatus::New,
|
||||
total_interactions: 0,
|
||||
positive_interactions: 0,
|
||||
negative_interactions: 0,
|
||||
transmission_enabled: false,
|
||||
is_broken: false,
|
||||
last_interaction: None,
|
||||
last_transmission: None,
|
||||
created_at: now,
|
||||
daily_interaction_count: 0,
|
||||
last_daily_reset: now,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
pub fn process_interaction(&mut self, user_id: &str, sentiment: f64) -> Result<f64> {
|
||||
let now = Utc::now();
|
||||
let score_change;
|
||||
|
||||
// Create relationship if it doesn't exist
|
||||
{
|
||||
let relationship = self.get_or_create_relationship(user_id);
|
||||
|
||||
// Reset daily count if needed
|
||||
if (now - relationship.last_daily_reset).num_days() >= 1 {
|
||||
relationship.daily_interaction_count = 0;
|
||||
relationship.last_daily_reset = now;
|
||||
}
|
||||
|
||||
// Apply daily interaction limit
|
||||
if relationship.daily_interaction_count >= 10 {
|
||||
return Ok(0.0); // No score change due to daily limit
|
||||
}
|
||||
|
||||
// Store previous score for potential future logging
|
||||
|
||||
// Calculate score change based on sentiment
|
||||
let mut base_score_change = sentiment * 0.5; // Base change
|
||||
|
||||
// Apply diminishing returns for high interaction counts
|
||||
let interaction_factor = 1.0 / (1.0 + relationship.total_interactions as f64 * 0.01);
|
||||
base_score_change *= interaction_factor;
|
||||
score_change = base_score_change;
|
||||
|
||||
// Update relationship data
|
||||
relationship.score += score_change;
|
||||
relationship.score = relationship.score.max(-50.0).min(100.0); // Clamp score
|
||||
relationship.total_interactions += 1;
|
||||
relationship.daily_interaction_count += 1;
|
||||
relationship.last_interaction = Some(now);
|
||||
|
||||
if sentiment > 0.0 {
|
||||
relationship.positive_interactions += 1;
|
||||
} else if sentiment < 0.0 {
|
||||
relationship.negative_interactions += 1;
|
||||
}
|
||||
|
||||
// Check for relationship breaking
|
||||
if relationship.score <= -20.0 && !relationship.is_broken {
|
||||
relationship.is_broken = true;
|
||||
relationship.transmission_enabled = false;
|
||||
relationship.status = RelationshipStatus::Broken;
|
||||
}
|
||||
|
||||
// Enable transmission if threshold is reached
|
||||
if relationship.score >= relationship.threshold && !relationship.is_broken {
|
||||
relationship.transmission_enabled = true;
|
||||
}
|
||||
}
|
||||
|
||||
// Update status based on score (separate borrow)
|
||||
self.update_relationship_status(user_id);
|
||||
|
||||
self.save_relationships()?;
|
||||
|
||||
Ok(score_change)
|
||||
}
|
||||
|
||||
fn update_relationship_status(&mut self, user_id: &str) {
|
||||
if let Some(relationship) = self.relationships.get_mut(user_id) {
|
||||
if relationship.is_broken {
|
||||
return; // Broken relationships cannot change status
|
||||
}
|
||||
|
||||
relationship.status = match relationship.score {
|
||||
score if score >= 50.0 => RelationshipStatus::CloseFriend,
|
||||
score if score >= 20.0 => RelationshipStatus::Friend,
|
||||
score if score >= 5.0 => RelationshipStatus::Acquaintance,
|
||||
_ => RelationshipStatus::New,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
pub fn apply_time_decay(&mut self) -> Result<()> {
|
||||
let now = Utc::now();
|
||||
let decay_rate = 0.1; // 10% decay per day
|
||||
|
||||
for relationship in self.relationships.values_mut() {
|
||||
if let Some(last_interaction) = relationship.last_interaction {
|
||||
let days_since_interaction = (now - last_interaction).num_days() as f64;
|
||||
|
||||
if days_since_interaction > 0.0 {
|
||||
let decay_factor = (1.0_f64 - decay_rate).powf(days_since_interaction);
|
||||
relationship.score *= decay_factor;
|
||||
|
||||
// Update status after decay
|
||||
if relationship.score < relationship.threshold {
|
||||
relationship.transmission_enabled = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Update statuses for all relationships
|
||||
let user_ids: Vec<String> = self.relationships.keys().cloned().collect();
|
||||
for user_id in user_ids {
|
||||
self.update_relationship_status(&user_id);
|
||||
}
|
||||
|
||||
self.save_relationships()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn get_relationship(&self, user_id: &str) -> Option<&Relationship> {
|
||||
self.relationships.get(user_id)
|
||||
}
|
||||
|
||||
pub fn list_all_relationships(&self) -> &HashMap<String, Relationship> {
|
||||
&self.relationships
|
||||
}
|
||||
|
||||
pub fn get_transmission_eligible(&self) -> HashMap<String, &Relationship> {
|
||||
self.relationships
|
||||
.iter()
|
||||
.filter(|(_, rel)| rel.transmission_enabled && !rel.is_broken)
|
||||
.map(|(id, rel)| (id.clone(), rel))
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn record_transmission(&mut self, user_id: &str) -> Result<()> {
|
||||
if let Some(relationship) = self.relationships.get_mut(user_id) {
|
||||
relationship.last_transmission = Some(Utc::now());
|
||||
self.save_relationships()?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn get_relationship_stats(&self) -> RelationshipStats {
|
||||
let total_relationships = self.relationships.len();
|
||||
let active_relationships = self.relationships
|
||||
.values()
|
||||
.filter(|r| r.total_interactions > 0)
|
||||
.count();
|
||||
let transmission_enabled = self.relationships
|
||||
.values()
|
||||
.filter(|r| r.transmission_enabled)
|
||||
.count();
|
||||
let broken_relationships = self.relationships
|
||||
.values()
|
||||
.filter(|r| r.is_broken)
|
||||
.count();
|
||||
|
||||
let avg_score = if total_relationships > 0 {
|
||||
self.relationships.values().map(|r| r.score).sum::<f64>() / total_relationships as f64
|
||||
} else {
|
||||
0.0
|
||||
};
|
||||
|
||||
RelationshipStats {
|
||||
total_relationships,
|
||||
active_relationships,
|
||||
transmission_enabled,
|
||||
broken_relationships,
|
||||
avg_score,
|
||||
}
|
||||
}
|
||||
|
||||
fn load_relationships(config: &Config) -> Result<HashMap<String, Relationship>> {
|
||||
let file_path = config.relationships_file();
|
||||
if !file_path.exists() {
|
||||
return Ok(HashMap::new());
|
||||
}
|
||||
|
||||
let content = std::fs::read_to_string(file_path)
|
||||
.context("Failed to read relationships file")?;
|
||||
|
||||
let relationships: HashMap<String, Relationship> = serde_json::from_str(&content)
|
||||
.context("Failed to parse relationships file")?;
|
||||
|
||||
Ok(relationships)
|
||||
}
|
||||
|
||||
fn save_relationships(&self) -> Result<()> {
|
||||
let content = serde_json::to_string_pretty(&self.relationships)
|
||||
.context("Failed to serialize relationships")?;
|
||||
|
||||
std::fs::write(&self.config.relationships_file(), content)
|
||||
.context("Failed to write relationships file")?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn get_all_relationships(&self) -> Result<HashMap<String, RelationshipCompact>> {
|
||||
let mut result = HashMap::new();
|
||||
|
||||
for (user_id, relationship) in &self.relationships {
|
||||
result.insert(user_id.clone(), RelationshipCompact {
|
||||
score: relationship.score,
|
||||
trust_level: relationship.score / 10.0, // Simplified trust calculation
|
||||
interaction_count: relationship.total_interactions,
|
||||
last_interaction: relationship.last_interaction.unwrap_or(relationship.created_at),
|
||||
status: relationship.status.clone(),
|
||||
});
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct RelationshipStats {
|
||||
pub total_relationships: usize,
|
||||
pub active_relationships: usize,
|
||||
pub transmission_enabled: usize,
|
||||
pub broken_relationships: usize,
|
||||
pub avg_score: f64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct RelationshipCompact {
|
||||
pub score: f64,
|
||||
pub trust_level: f64,
|
||||
pub interaction_count: u32,
|
||||
pub last_interaction: DateTime<Utc>,
|
||||
pub status: RelationshipStatus,
|
||||
}
|
||||
458
src/scheduler.rs
Normal file
458
src/scheduler.rs
Normal file
@@ -0,0 +1,458 @@
|
||||
use std::collections::HashMap;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use anyhow::{Result, Context};
|
||||
use chrono::{DateTime, Utc, Duration};
|
||||
|
||||
use crate::config::Config;
|
||||
use crate::persona::Persona;
|
||||
use crate::transmission::TransmissionController;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ScheduledTask {
|
||||
pub id: String,
|
||||
pub task_type: TaskType,
|
||||
pub next_run: DateTime<Utc>,
|
||||
pub interval_hours: Option<i64>,
|
||||
pub enabled: bool,
|
||||
pub last_run: Option<DateTime<Utc>>,
|
||||
pub run_count: u32,
|
||||
pub max_runs: Option<u32>,
|
||||
pub created_at: DateTime<Utc>,
|
||||
pub metadata: HashMap<String, String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub enum TaskType {
|
||||
DailyMaintenance,
|
||||
AutoTransmission,
|
||||
RelationshipDecay,
|
||||
BreakthroughCheck,
|
||||
MaintenanceTransmission,
|
||||
Custom(String),
|
||||
}
|
||||
|
||||
impl std::fmt::Display for TaskType {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
TaskType::DailyMaintenance => write!(f, "daily_maintenance"),
|
||||
TaskType::AutoTransmission => write!(f, "auto_transmission"),
|
||||
TaskType::RelationshipDecay => write!(f, "relationship_decay"),
|
||||
TaskType::BreakthroughCheck => write!(f, "breakthrough_check"),
|
||||
TaskType::MaintenanceTransmission => write!(f, "maintenance_transmission"),
|
||||
TaskType::Custom(name) => write!(f, "custom_{}", name),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct TaskExecution {
|
||||
pub task_id: String,
|
||||
pub execution_time: DateTime<Utc>,
|
||||
pub duration_ms: u64,
|
||||
pub success: bool,
|
||||
pub result: Option<String>,
|
||||
pub error: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct AIScheduler {
|
||||
config: Config,
|
||||
tasks: HashMap<String, ScheduledTask>,
|
||||
execution_history: Vec<TaskExecution>,
|
||||
last_check: Option<DateTime<Utc>>,
|
||||
}
|
||||
|
||||
impl AIScheduler {
|
||||
pub fn new(config: &Config) -> Result<Self> {
|
||||
let (tasks, execution_history) = Self::load_scheduler_data(config)?;
|
||||
|
||||
let mut scheduler = AIScheduler {
|
||||
config: config.clone(),
|
||||
tasks,
|
||||
execution_history,
|
||||
last_check: None,
|
||||
};
|
||||
|
||||
// Initialize default tasks if none exist
|
||||
if scheduler.tasks.is_empty() {
|
||||
scheduler.create_default_tasks()?;
|
||||
}
|
||||
|
||||
Ok(scheduler)
|
||||
}
|
||||
|
||||
pub async fn run_scheduled_tasks(&mut self, persona: &mut Persona, transmission_controller: &mut TransmissionController) -> Result<Vec<TaskExecution>> {
|
||||
let now = Utc::now();
|
||||
let mut executions = Vec::new();
|
||||
|
||||
// Find tasks that are due to run
|
||||
let due_task_ids: Vec<String> = self.tasks
|
||||
.iter()
|
||||
.filter(|(_, task)| task.enabled && task.next_run <= now)
|
||||
.filter(|(_, task)| {
|
||||
// Check if task hasn't exceeded max runs
|
||||
if let Some(max_runs) = task.max_runs {
|
||||
task.run_count < max_runs
|
||||
} else {
|
||||
true
|
||||
}
|
||||
})
|
||||
.map(|(id, _)| id.clone())
|
||||
.collect();
|
||||
|
||||
for task_id in due_task_ids {
|
||||
let execution = self.execute_task(&task_id, persona, transmission_controller).await?;
|
||||
executions.push(execution);
|
||||
}
|
||||
|
||||
self.last_check = Some(now);
|
||||
self.save_scheduler_data()?;
|
||||
|
||||
Ok(executions)
|
||||
}
|
||||
|
||||
async fn execute_task(&mut self, task_id: &str, persona: &mut Persona, transmission_controller: &mut TransmissionController) -> Result<TaskExecution> {
|
||||
let start_time = Utc::now();
|
||||
let mut execution = TaskExecution {
|
||||
task_id: task_id.to_string(),
|
||||
execution_time: start_time,
|
||||
duration_ms: 0,
|
||||
success: false,
|
||||
result: None,
|
||||
error: None,
|
||||
};
|
||||
|
||||
// Get task type without borrowing mutably
|
||||
let task_type = {
|
||||
let task = self.tasks.get(task_id)
|
||||
.ok_or_else(|| anyhow::anyhow!("Task not found: {}", task_id))?;
|
||||
task.task_type.clone()
|
||||
};
|
||||
|
||||
// Execute the task based on its type
|
||||
let result = match &task_type {
|
||||
TaskType::DailyMaintenance => self.execute_daily_maintenance(persona, transmission_controller).await,
|
||||
TaskType::AutoTransmission => self.execute_auto_transmission(persona, transmission_controller).await,
|
||||
TaskType::RelationshipDecay => self.execute_relationship_decay(persona).await,
|
||||
TaskType::BreakthroughCheck => self.execute_breakthrough_check(persona, transmission_controller).await,
|
||||
TaskType::MaintenanceTransmission => self.execute_maintenance_transmission(persona, transmission_controller).await,
|
||||
TaskType::Custom(name) => self.execute_custom_task(name, persona, transmission_controller).await,
|
||||
};
|
||||
|
||||
let end_time = Utc::now();
|
||||
execution.duration_ms = (end_time - start_time).num_milliseconds() as u64;
|
||||
|
||||
// Now update the task state with mutable borrow
|
||||
match result {
|
||||
Ok(message) => {
|
||||
execution.success = true;
|
||||
execution.result = Some(message);
|
||||
|
||||
// Update task state
|
||||
if let Some(task) = self.tasks.get_mut(task_id) {
|
||||
task.last_run = Some(start_time);
|
||||
task.run_count += 1;
|
||||
|
||||
// Schedule next run if recurring
|
||||
if let Some(interval_hours) = task.interval_hours {
|
||||
task.next_run = start_time + Duration::hours(interval_hours);
|
||||
} else {
|
||||
// One-time task, disable it
|
||||
task.enabled = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
execution.error = Some(e.to_string());
|
||||
|
||||
// For failed tasks, retry in a shorter interval
|
||||
if let Some(task) = self.tasks.get_mut(task_id) {
|
||||
if task.interval_hours.is_some() {
|
||||
task.next_run = start_time + Duration::minutes(15); // Retry in 15 minutes
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
self.execution_history.push(execution.clone());
|
||||
|
||||
// Keep only recent execution history (last 1000 executions)
|
||||
if self.execution_history.len() > 1000 {
|
||||
self.execution_history.drain(..self.execution_history.len() - 1000);
|
||||
}
|
||||
|
||||
Ok(execution)
|
||||
}
|
||||
|
||||
async fn execute_daily_maintenance(&self, persona: &mut Persona, transmission_controller: &mut TransmissionController) -> Result<String> {
|
||||
// Run daily maintenance
|
||||
persona.daily_maintenance()?;
|
||||
|
||||
// Check for maintenance transmissions
|
||||
let transmissions = transmission_controller.check_maintenance_transmissions(persona).await?;
|
||||
|
||||
Ok(format!("Daily maintenance completed. {} maintenance transmissions sent.", transmissions.len()))
|
||||
}
|
||||
|
||||
async fn execute_auto_transmission(&self, _persona: &mut Persona, transmission_controller: &mut TransmissionController) -> Result<String> {
|
||||
let transmissions = transmission_controller.check_autonomous_transmissions(_persona).await?;
|
||||
Ok(format!("Autonomous transmission check completed. {} transmissions sent.", transmissions.len()))
|
||||
}
|
||||
|
||||
async fn execute_relationship_decay(&self, persona: &mut Persona) -> Result<String> {
|
||||
persona.daily_maintenance()?;
|
||||
Ok("Relationship time decay applied.".to_string())
|
||||
}
|
||||
|
||||
async fn execute_breakthrough_check(&self, persona: &mut Persona, transmission_controller: &mut TransmissionController) -> Result<String> {
|
||||
let transmissions = transmission_controller.check_breakthrough_transmissions(persona).await?;
|
||||
Ok(format!("Breakthrough check completed. {} transmissions sent.", transmissions.len()))
|
||||
}
|
||||
|
||||
async fn execute_maintenance_transmission(&self, persona: &mut Persona, transmission_controller: &mut TransmissionController) -> Result<String> {
|
||||
let transmissions = transmission_controller.check_maintenance_transmissions(persona).await?;
|
||||
Ok(format!("Maintenance transmission check completed. {} transmissions sent.", transmissions.len()))
|
||||
}
|
||||
|
||||
async fn execute_custom_task(&self, _name: &str, _persona: &mut Persona, _transmission_controller: &mut TransmissionController) -> Result<String> {
|
||||
// Placeholder for custom task execution
|
||||
Ok("Custom task executed.".to_string())
|
||||
}
|
||||
|
||||
pub fn create_task(&mut self, task_type: TaskType, next_run: DateTime<Utc>, interval_hours: Option<i64>) -> Result<String> {
|
||||
let task_id = uuid::Uuid::new_v4().to_string();
|
||||
let now = Utc::now();
|
||||
|
||||
let task = ScheduledTask {
|
||||
id: task_id.clone(),
|
||||
task_type,
|
||||
next_run,
|
||||
interval_hours,
|
||||
enabled: true,
|
||||
last_run: None,
|
||||
run_count: 0,
|
||||
max_runs: None,
|
||||
created_at: now,
|
||||
metadata: HashMap::new(),
|
||||
};
|
||||
|
||||
self.tasks.insert(task_id.clone(), task);
|
||||
self.save_scheduler_data()?;
|
||||
|
||||
Ok(task_id)
|
||||
}
|
||||
|
||||
pub fn enable_task(&mut self, task_id: &str) -> Result<()> {
|
||||
if let Some(task) = self.tasks.get_mut(task_id) {
|
||||
task.enabled = true;
|
||||
self.save_scheduler_data()?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn disable_task(&mut self, task_id: &str) -> Result<()> {
|
||||
if let Some(task) = self.tasks.get_mut(task_id) {
|
||||
task.enabled = false;
|
||||
self.save_scheduler_data()?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn delete_task(&mut self, task_id: &str) -> Result<()> {
|
||||
self.tasks.remove(task_id);
|
||||
self.save_scheduler_data()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn get_task(&self, task_id: &str) -> Option<&ScheduledTask> {
|
||||
self.tasks.get(task_id)
|
||||
}
|
||||
|
||||
pub fn get_tasks(&self) -> &HashMap<String, ScheduledTask> {
|
||||
&self.tasks
|
||||
}
|
||||
|
||||
pub fn get_due_tasks(&self) -> Vec<&ScheduledTask> {
|
||||
let now = Utc::now();
|
||||
self.tasks
|
||||
.values()
|
||||
.filter(|task| task.enabled && task.next_run <= now)
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn get_execution_history(&self, limit: Option<usize>) -> Vec<&TaskExecution> {
|
||||
let mut executions: Vec<_> = self.execution_history.iter().collect();
|
||||
executions.sort_by(|a, b| b.execution_time.cmp(&a.execution_time));
|
||||
|
||||
match limit {
|
||||
Some(limit) => executions.into_iter().take(limit).collect(),
|
||||
None => executions,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_scheduler_stats(&self) -> SchedulerStats {
|
||||
let total_tasks = self.tasks.len();
|
||||
let enabled_tasks = self.tasks.values().filter(|task| task.enabled).count();
|
||||
let due_tasks = self.get_due_tasks().len();
|
||||
|
||||
let total_executions = self.execution_history.len();
|
||||
let successful_executions = self.execution_history.iter()
|
||||
.filter(|exec| exec.success)
|
||||
.count();
|
||||
|
||||
let today = Utc::now().date_naive();
|
||||
let today_executions = self.execution_history.iter()
|
||||
.filter(|exec| exec.execution_time.date_naive() == today)
|
||||
.count();
|
||||
|
||||
let avg_duration = if total_executions > 0 {
|
||||
self.execution_history.iter()
|
||||
.map(|exec| exec.duration_ms)
|
||||
.sum::<u64>() as f64 / total_executions as f64
|
||||
} else {
|
||||
0.0
|
||||
};
|
||||
|
||||
SchedulerStats {
|
||||
total_tasks,
|
||||
enabled_tasks,
|
||||
due_tasks,
|
||||
total_executions,
|
||||
successful_executions,
|
||||
today_executions,
|
||||
success_rate: if total_executions > 0 {
|
||||
successful_executions as f64 / total_executions as f64
|
||||
} else {
|
||||
0.0
|
||||
},
|
||||
avg_duration_ms: avg_duration,
|
||||
}
|
||||
}
|
||||
|
||||
fn create_default_tasks(&mut self) -> Result<()> {
|
||||
let now = Utc::now();
|
||||
|
||||
// Daily maintenance task - run every day at 3 AM
|
||||
let mut daily_maintenance_time = now.date_naive().and_hms_opt(3, 0, 0).unwrap().and_utc();
|
||||
if daily_maintenance_time <= now {
|
||||
daily_maintenance_time = daily_maintenance_time + Duration::days(1);
|
||||
}
|
||||
|
||||
self.create_task(
|
||||
TaskType::DailyMaintenance,
|
||||
daily_maintenance_time,
|
||||
Some(24), // 24 hours = 1 day
|
||||
)?;
|
||||
|
||||
// Auto transmission check - every 4 hours
|
||||
self.create_task(
|
||||
TaskType::AutoTransmission,
|
||||
now + Duration::hours(1),
|
||||
Some(4),
|
||||
)?;
|
||||
|
||||
// Breakthrough check - every 2 hours
|
||||
self.create_task(
|
||||
TaskType::BreakthroughCheck,
|
||||
now + Duration::minutes(30),
|
||||
Some(2),
|
||||
)?;
|
||||
|
||||
// Maintenance transmission - once per day
|
||||
let mut maintenance_time = now.date_naive().and_hms_opt(12, 0, 0).unwrap().and_utc();
|
||||
if maintenance_time <= now {
|
||||
maintenance_time = maintenance_time + Duration::days(1);
|
||||
}
|
||||
|
||||
self.create_task(
|
||||
TaskType::MaintenanceTransmission,
|
||||
maintenance_time,
|
||||
Some(24), // 24 hours = 1 day
|
||||
)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn load_scheduler_data(config: &Config) -> Result<(HashMap<String, ScheduledTask>, Vec<TaskExecution>)> {
|
||||
let tasks_file = config.scheduler_tasks_file();
|
||||
let history_file = config.scheduler_history_file();
|
||||
|
||||
let tasks = if tasks_file.exists() {
|
||||
let content = std::fs::read_to_string(tasks_file)
|
||||
.context("Failed to read scheduler tasks file")?;
|
||||
serde_json::from_str(&content)
|
||||
.context("Failed to parse scheduler tasks file")?
|
||||
} else {
|
||||
HashMap::new()
|
||||
};
|
||||
|
||||
let history = if history_file.exists() {
|
||||
let content = std::fs::read_to_string(history_file)
|
||||
.context("Failed to read scheduler history file")?;
|
||||
serde_json::from_str(&content)
|
||||
.context("Failed to parse scheduler history file")?
|
||||
} else {
|
||||
Vec::new()
|
||||
};
|
||||
|
||||
Ok((tasks, history))
|
||||
}
|
||||
|
||||
fn save_scheduler_data(&self) -> Result<()> {
|
||||
// Save tasks
|
||||
let tasks_content = serde_json::to_string_pretty(&self.tasks)
|
||||
.context("Failed to serialize scheduler tasks")?;
|
||||
std::fs::write(&self.config.scheduler_tasks_file(), tasks_content)
|
||||
.context("Failed to write scheduler tasks file")?;
|
||||
|
||||
// Save execution history
|
||||
let history_content = serde_json::to_string_pretty(&self.execution_history)
|
||||
.context("Failed to serialize scheduler history")?;
|
||||
std::fs::write(&self.config.scheduler_history_file(), history_content)
|
||||
.context("Failed to write scheduler history file")?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
// Type alias for compatibility with CLI interface
|
||||
pub type Scheduler = AIScheduler;
|
||||
|
||||
impl Scheduler {
|
||||
pub fn list_tasks(&self) -> Result<Vec<ScheduledTaskInfo>> {
|
||||
let tasks: Vec<ScheduledTaskInfo> = self.tasks
|
||||
.values()
|
||||
.map(|task| ScheduledTaskInfo {
|
||||
name: task.task_type.to_string(),
|
||||
schedule: match task.interval_hours {
|
||||
Some(hours) => format!("Every {} hours", hours),
|
||||
None => "One-time".to_string(),
|
||||
},
|
||||
next_run: task.next_run,
|
||||
enabled: task.enabled,
|
||||
})
|
||||
.collect();
|
||||
|
||||
Ok(tasks)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct SchedulerStats {
|
||||
pub total_tasks: usize,
|
||||
pub enabled_tasks: usize,
|
||||
pub due_tasks: usize,
|
||||
pub total_executions: usize,
|
||||
pub successful_executions: usize,
|
||||
pub today_executions: usize,
|
||||
pub success_rate: f64,
|
||||
pub avg_duration_ms: f64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ScheduledTaskInfo {
|
||||
pub name: String,
|
||||
pub schedule: String,
|
||||
pub next_run: DateTime<Utc>,
|
||||
pub enabled: bool,
|
||||
}
|
||||
608
src/shell.rs
Normal file
608
src/shell.rs
Normal file
@@ -0,0 +1,608 @@
|
||||
use std::path::PathBuf;
|
||||
use std::process::{Command, Stdio};
|
||||
use std::io::{self, Write};
|
||||
use anyhow::{Result, Context};
|
||||
use colored::*;
|
||||
use rustyline::error::ReadlineError;
|
||||
use rustyline::Editor;
|
||||
use rustyline::completion::{Completer, FilenameCompleter, Pair};
|
||||
use rustyline::history::{History, DefaultHistory};
|
||||
use rustyline::highlight::Highlighter;
|
||||
use rustyline::hint::Hinter;
|
||||
use rustyline::validate::Validator;
|
||||
use rustyline::Helper;
|
||||
|
||||
use crate::config::Config;
|
||||
use crate::persona::Persona;
|
||||
use crate::ai_provider::{AIProviderClient, AIProvider, AIConfig};
|
||||
|
||||
pub async fn handle_shell(
|
||||
user_id: String,
|
||||
data_dir: Option<PathBuf>,
|
||||
model: Option<String>,
|
||||
provider: Option<String>,
|
||||
) -> Result<()> {
|
||||
let config = Config::new(data_dir)?;
|
||||
|
||||
let mut shell = ShellMode::new(config, user_id)?
|
||||
.with_ai_provider(provider, model);
|
||||
|
||||
shell.run().await
|
||||
}
|
||||
|
||||
pub struct ShellMode {
|
||||
config: Config,
|
||||
persona: Persona,
|
||||
ai_provider: Option<AIProviderClient>,
|
||||
user_id: String,
|
||||
editor: Editor<ShellCompleter, DefaultHistory>,
|
||||
}
|
||||
|
||||
struct ShellCompleter {
|
||||
completer: FilenameCompleter,
|
||||
}
|
||||
|
||||
impl ShellCompleter {
|
||||
fn new() -> Self {
|
||||
ShellCompleter {
|
||||
completer: FilenameCompleter::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Helper for ShellCompleter {}
|
||||
|
||||
impl Hinter for ShellCompleter {
|
||||
type Hint = String;
|
||||
|
||||
fn hint(&self, _line: &str, _pos: usize, _ctx: &rustyline::Context<'_>) -> Option<String> {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
impl Highlighter for ShellCompleter {}
|
||||
|
||||
impl Validator for ShellCompleter {}
|
||||
|
||||
impl Completer for ShellCompleter {
|
||||
type Candidate = Pair;
|
||||
|
||||
fn complete(
|
||||
&self,
|
||||
line: &str,
|
||||
pos: usize,
|
||||
ctx: &rustyline::Context<'_>,
|
||||
) -> rustyline::Result<(usize, Vec<Pair>)> {
|
||||
// Custom completion for slash commands
|
||||
if line.starts_with('/') {
|
||||
let commands = vec![
|
||||
"/status", "/relationships", "/memories", "/analyze",
|
||||
"/fortune", "/clear", "/history", "/help", "/exit"
|
||||
];
|
||||
|
||||
let word_start = line.rfind(' ').map_or(0, |i| i + 1);
|
||||
let word = &line[word_start..pos];
|
||||
|
||||
let matches: Vec<Pair> = commands.iter()
|
||||
.filter(|cmd| cmd.starts_with(word))
|
||||
.map(|cmd| Pair {
|
||||
display: cmd.to_string(),
|
||||
replacement: cmd.to_string(),
|
||||
})
|
||||
.collect();
|
||||
|
||||
return Ok((word_start, matches));
|
||||
}
|
||||
|
||||
// Custom completion for shell commands starting with !
|
||||
if line.starts_with('!') {
|
||||
let shell_commands = vec![
|
||||
"ls", "pwd", "cd", "cat", "grep", "find", "ps", "top",
|
||||
"echo", "mkdir", "rmdir", "cp", "mv", "rm", "touch",
|
||||
"git", "cargo", "npm", "python", "node"
|
||||
];
|
||||
|
||||
let word_start = line.rfind(' ').map_or(1, |i| i + 1); // Skip the '!'
|
||||
let word = &line[word_start..pos];
|
||||
|
||||
let matches: Vec<Pair> = shell_commands.iter()
|
||||
.filter(|cmd| cmd.starts_with(word))
|
||||
.map(|cmd| Pair {
|
||||
display: cmd.to_string(),
|
||||
replacement: cmd.to_string(),
|
||||
})
|
||||
.collect();
|
||||
|
||||
return Ok((word_start, matches));
|
||||
}
|
||||
|
||||
// Fallback to filename completion
|
||||
self.completer.complete(line, pos, ctx)
|
||||
}
|
||||
}
|
||||
|
||||
impl ShellMode {
|
||||
pub fn new(config: Config, user_id: String) -> Result<Self> {
|
||||
let persona = Persona::new(&config)?;
|
||||
|
||||
// Setup rustyline editor with completer
|
||||
let completer = ShellCompleter::new();
|
||||
let mut editor = Editor::with_config(
|
||||
rustyline::Config::builder()
|
||||
.tab_stop(4)
|
||||
.build()
|
||||
)?;
|
||||
editor.set_helper(Some(completer));
|
||||
|
||||
// Load history if exists
|
||||
let history_file = config.data_dir.join("shell_history.txt");
|
||||
if history_file.exists() {
|
||||
let _ = editor.load_history(&history_file);
|
||||
}
|
||||
|
||||
Ok(ShellMode {
|
||||
config,
|
||||
persona,
|
||||
ai_provider: None,
|
||||
user_id,
|
||||
editor,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn with_ai_provider(mut self, provider: Option<String>, model: Option<String>) -> Self {
|
||||
// Use provided parameters or fall back to config defaults
|
||||
let provider_name = provider
|
||||
.or_else(|| Some(self.config.default_provider.clone()))
|
||||
.unwrap_or_else(|| "ollama".to_string());
|
||||
|
||||
let model_name = model.or_else(|| {
|
||||
// Try to get default model from config for the chosen provider
|
||||
self.config.providers.get(&provider_name)
|
||||
.map(|p| p.default_model.clone())
|
||||
}).unwrap_or_else(|| {
|
||||
// Final fallback based on provider
|
||||
match provider_name.as_str() {
|
||||
"openai" => "gpt-4o-mini".to_string(),
|
||||
"ollama" => "qwen2.5-coder:latest".to_string(),
|
||||
_ => "qwen2.5-coder:latest".to_string(),
|
||||
}
|
||||
});
|
||||
|
||||
let ai_provider = match provider_name.as_str() {
|
||||
"ollama" => AIProvider::Ollama,
|
||||
"openai" => AIProvider::OpenAI,
|
||||
"claude" => AIProvider::Claude,
|
||||
_ => AIProvider::Ollama, // Default fallback
|
||||
};
|
||||
|
||||
let ai_config = AIConfig {
|
||||
provider: ai_provider,
|
||||
model: model_name,
|
||||
api_key: None, // Will be loaded from environment if needed
|
||||
base_url: None,
|
||||
max_tokens: Some(2000),
|
||||
temperature: Some(0.7),
|
||||
};
|
||||
|
||||
let client = AIProviderClient::new(ai_config);
|
||||
self.ai_provider = Some(client);
|
||||
|
||||
self
|
||||
}
|
||||
|
||||
pub async fn run(&mut self) -> Result<()> {
|
||||
println!("{}", "🚀 Starting ai.gpt Interactive Shell".cyan().bold());
|
||||
|
||||
// Show AI provider info
|
||||
if let Some(ai_provider) = &self.ai_provider {
|
||||
println!("{}: {} ({})",
|
||||
"AI Provider".green().bold(),
|
||||
ai_provider.get_provider().to_string(),
|
||||
ai_provider.get_model());
|
||||
} else {
|
||||
println!("{}: {}", "AI Provider".yellow().bold(), "Simple mode (no AI)");
|
||||
}
|
||||
|
||||
println!("{}", "Type 'help' for commands, 'exit' to quit".dimmed());
|
||||
println!("{}", "Use Tab for command completion, Ctrl+C to interrupt, Ctrl+D to exit".dimmed());
|
||||
|
||||
loop {
|
||||
// Read user input with rustyline (supports completion, history, etc.)
|
||||
let readline = self.editor.readline("ai.shell> ");
|
||||
|
||||
match readline {
|
||||
Ok(line) => {
|
||||
let input = line.trim();
|
||||
|
||||
// Skip empty input
|
||||
if input.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Add to history
|
||||
self.editor.add_history_entry(input)
|
||||
.context("Failed to add to history")?;
|
||||
|
||||
// Handle input
|
||||
if let Err(e) = self.handle_input(input).await {
|
||||
println!("{}: {}", "Error".red().bold(), e);
|
||||
}
|
||||
}
|
||||
Err(ReadlineError::Interrupted) => {
|
||||
// Ctrl+C
|
||||
println!("{}", "Use 'exit' or Ctrl+D to quit".yellow());
|
||||
continue;
|
||||
}
|
||||
Err(ReadlineError::Eof) => {
|
||||
// Ctrl+D
|
||||
println!("\n{}", "Goodbye!".cyan());
|
||||
break;
|
||||
}
|
||||
Err(err) => {
|
||||
println!("{}: {}", "Input error".red().bold(), err);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Save history before exit
|
||||
self.save_history()?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn handle_input(&mut self, input: &str) -> Result<()> {
|
||||
match input {
|
||||
// Exit commands
|
||||
"exit" | "quit" | "/exit" | "/quit" => {
|
||||
println!("{}", "Goodbye!".cyan());
|
||||
std::process::exit(0);
|
||||
}
|
||||
// Help command
|
||||
"help" | "/help" => {
|
||||
self.show_help();
|
||||
}
|
||||
// Shell commands (starting with !)
|
||||
input if input.starts_with('!') => {
|
||||
self.execute_shell_command(&input[1..]).await?;
|
||||
}
|
||||
// Slash commands (starting with /)
|
||||
input if input.starts_with('/') => {
|
||||
self.execute_slash_command(input).await?;
|
||||
}
|
||||
// AI conversation
|
||||
_ => {
|
||||
self.handle_ai_conversation(input).await?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn show_help(&self) {
|
||||
println!("\n{}", "ai.gpt Interactive Shell Commands".cyan().bold());
|
||||
println!();
|
||||
|
||||
println!("{}", "Navigation & Input:".yellow().bold());
|
||||
println!(" {} - Tab completion for commands and files", "Tab".green());
|
||||
println!(" {} - Command history (previous/next)", "↑/↓ or Ctrl+P/N".green());
|
||||
println!(" {} - Interrupt current input", "Ctrl+C".green());
|
||||
println!(" {} - Exit shell", "Ctrl+D".green());
|
||||
println!();
|
||||
|
||||
println!("{}", "Basic Commands:".yellow().bold());
|
||||
println!(" {} - Show this help", "help".green());
|
||||
println!(" {} - Exit the shell", "exit, quit".green());
|
||||
println!(" {} - Clear screen", "/clear".green());
|
||||
println!(" {} - Show command history", "/history".green());
|
||||
println!();
|
||||
|
||||
println!("{}", "Shell Commands:".yellow().bold());
|
||||
println!(" {} - Execute shell command (Tab completion)", "!<command>".green());
|
||||
println!(" {} - List files", "!ls".green());
|
||||
println!(" {} - Show current directory", "!pwd".green());
|
||||
println!(" {} - Git status", "!git status".green());
|
||||
println!(" {} - Cargo build", "!cargo build".green());
|
||||
println!();
|
||||
|
||||
println!("{}", "AI Commands:".yellow().bold());
|
||||
println!(" {} - Show AI status and relationship", "/status".green());
|
||||
println!(" {} - List all relationships", "/relationships".green());
|
||||
println!(" {} - Show recent memories", "/memories".green());
|
||||
println!(" {} - Analyze current directory", "/analyze".green());
|
||||
println!(" {} - Show today's fortune", "/fortune".green());
|
||||
println!();
|
||||
|
||||
println!("{}", "Conversation:".yellow().bold());
|
||||
println!(" {} - Chat with AI using configured provider", "Any other input".green());
|
||||
println!(" {} - AI responses track relationship changes", "Relationship tracking".dimmed());
|
||||
println!();
|
||||
}
|
||||
|
||||
async fn execute_shell_command(&self, command: &str) -> Result<()> {
|
||||
println!("{} {}", "Executing:".blue().bold(), command.yellow());
|
||||
|
||||
let output = if cfg!(target_os = "windows") {
|
||||
Command::new("cmd")
|
||||
.args(["/C", command])
|
||||
.stdout(Stdio::piped())
|
||||
.stderr(Stdio::piped())
|
||||
.output()
|
||||
.context("Failed to execute command")?
|
||||
} else {
|
||||
Command::new("sh")
|
||||
.args(["-c", command])
|
||||
.stdout(Stdio::piped())
|
||||
.stderr(Stdio::piped())
|
||||
.output()
|
||||
.context("Failed to execute command")?
|
||||
};
|
||||
|
||||
// Print stdout
|
||||
if !output.stdout.is_empty() {
|
||||
let stdout = String::from_utf8_lossy(&output.stdout);
|
||||
println!("{}", stdout);
|
||||
}
|
||||
|
||||
// Print stderr in red
|
||||
if !output.stderr.is_empty() {
|
||||
let stderr = String::from_utf8_lossy(&output.stderr);
|
||||
println!("{}", stderr.red());
|
||||
}
|
||||
|
||||
// Show exit code if not successful
|
||||
if !output.status.success() {
|
||||
if let Some(code) = output.status.code() {
|
||||
println!("{}: {}", "Exit code".red().bold(), code);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn execute_slash_command(&mut self, command: &str) -> Result<()> {
|
||||
match command {
|
||||
"/status" => {
|
||||
self.show_ai_status().await?;
|
||||
}
|
||||
"/relationships" => {
|
||||
self.show_relationships().await?;
|
||||
}
|
||||
"/memories" => {
|
||||
self.show_memories().await?;
|
||||
}
|
||||
"/analyze" => {
|
||||
self.analyze_directory().await?;
|
||||
}
|
||||
"/fortune" => {
|
||||
self.show_fortune().await?;
|
||||
}
|
||||
"/clear" => {
|
||||
// Clear screen
|
||||
print!("\x1B[2J\x1B[1;1H");
|
||||
io::stdout().flush()?;
|
||||
}
|
||||
"/history" => {
|
||||
self.show_history();
|
||||
}
|
||||
_ => {
|
||||
println!("{}: {}", "Unknown command".red().bold(), command);
|
||||
println!("Type '{}' for available commands", "help".green());
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn handle_ai_conversation(&mut self, input: &str) -> Result<()> {
|
||||
let (response, relationship_delta) = if let Some(ai_provider) = &self.ai_provider {
|
||||
// Use AI provider for response
|
||||
self.persona.process_ai_interaction(&self.user_id, input,
|
||||
Some(ai_provider.get_provider().to_string()),
|
||||
Some(ai_provider.get_model().to_string())).await?
|
||||
} else {
|
||||
// Use simple response
|
||||
self.persona.process_interaction(&self.user_id, input)?
|
||||
};
|
||||
|
||||
// Display conversation
|
||||
println!("{}: {}", "You".cyan().bold(), input);
|
||||
println!("{}: {}", "AI".green().bold(), response);
|
||||
|
||||
// Show relationship change if significant
|
||||
if relationship_delta.abs() >= 0.1 {
|
||||
if relationship_delta > 0.0 {
|
||||
println!("{}", format!("(+{:.2} relationship)", relationship_delta).green());
|
||||
} else {
|
||||
println!("{}", format!("({:.2} relationship)", relationship_delta).red());
|
||||
}
|
||||
}
|
||||
|
||||
println!(); // Add spacing
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn show_ai_status(&self) -> Result<()> {
|
||||
let state = self.persona.get_current_state()?;
|
||||
|
||||
println!("\n{}", "AI Status".cyan().bold());
|
||||
println!("Mood: {}", state.current_mood.yellow());
|
||||
println!("Fortune: {}/10", state.fortune_value.to_string().yellow());
|
||||
|
||||
if let Some(relationship) = self.persona.get_relationship(&self.user_id) {
|
||||
println!("\n{}", "Your Relationship".cyan().bold());
|
||||
println!("Status: {}", relationship.status.to_string().yellow());
|
||||
println!("Score: {:.2} / {}", relationship.score, relationship.threshold);
|
||||
println!("Interactions: {}", relationship.total_interactions);
|
||||
}
|
||||
|
||||
println!();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn show_relationships(&self) -> Result<()> {
|
||||
let relationships = self.persona.list_all_relationships();
|
||||
|
||||
if relationships.is_empty() {
|
||||
println!("{}", "No relationships yet".yellow());
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
println!("\n{}", "All Relationships".cyan().bold());
|
||||
println!();
|
||||
|
||||
for (user_id, rel) in relationships {
|
||||
let transmission = if rel.is_broken {
|
||||
"💔"
|
||||
} else if rel.transmission_enabled {
|
||||
"✓"
|
||||
} else {
|
||||
"✗"
|
||||
};
|
||||
|
||||
let user_display = if user_id.len() > 20 {
|
||||
format!("{}...", &user_id[..20])
|
||||
} else {
|
||||
user_id
|
||||
};
|
||||
|
||||
println!("{:<25} {:<12} {:<8} {}",
|
||||
user_display.cyan(),
|
||||
rel.status.to_string(),
|
||||
format!("{:.2}", rel.score),
|
||||
transmission);
|
||||
}
|
||||
|
||||
println!();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn show_memories(&mut self) -> Result<()> {
|
||||
let memories = self.persona.get_memories(&self.user_id, 10);
|
||||
|
||||
if memories.is_empty() {
|
||||
println!("{}", "No memories yet".yellow());
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
println!("\n{}", "Recent Memories".cyan().bold());
|
||||
println!();
|
||||
|
||||
for (i, memory) in memories.iter().enumerate() {
|
||||
println!("{}: {}",
|
||||
format!("Memory {}", i + 1).dimmed(),
|
||||
memory);
|
||||
println!();
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn analyze_directory(&self) -> Result<()> {
|
||||
println!("{}", "Analyzing current directory...".blue().bold());
|
||||
|
||||
// Get current directory
|
||||
let current_dir = std::env::current_dir()
|
||||
.context("Failed to get current directory")?;
|
||||
|
||||
println!("Directory: {}", current_dir.display().to_string().yellow());
|
||||
|
||||
// List files and directories
|
||||
let entries = std::fs::read_dir(¤t_dir)
|
||||
.context("Failed to read directory")?;
|
||||
|
||||
let mut files = Vec::new();
|
||||
let mut dirs = Vec::new();
|
||||
|
||||
for entry in entries {
|
||||
let entry = entry.context("Failed to read directory entry")?;
|
||||
let path = entry.path();
|
||||
let name = path.file_name()
|
||||
.and_then(|n| n.to_str())
|
||||
.unwrap_or("Unknown");
|
||||
|
||||
if path.is_dir() {
|
||||
dirs.push(name.to_string());
|
||||
} else {
|
||||
files.push(name.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
if !dirs.is_empty() {
|
||||
println!("\n{}: {}", "Directories".blue().bold(), dirs.join(", "));
|
||||
}
|
||||
|
||||
if !files.is_empty() {
|
||||
println!("{}: {}", "Files".blue().bold(), files.join(", "));
|
||||
}
|
||||
|
||||
// Check for common project files
|
||||
let project_files = ["Cargo.toml", "package.json", "requirements.txt", "Makefile", "README.md"];
|
||||
let found_files: Vec<_> = project_files.iter()
|
||||
.filter(|&&file| files.contains(&file.to_string()))
|
||||
.collect();
|
||||
|
||||
if !found_files.is_empty() {
|
||||
println!("\n{}: {}", "Project files detected".green().bold(),
|
||||
found_files.iter().map(|s| s.to_string()).collect::<Vec<_>>().join(", "));
|
||||
}
|
||||
|
||||
println!();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn show_fortune(&self) -> Result<()> {
|
||||
let state = self.persona.get_current_state()?;
|
||||
|
||||
let fortune_stars = "🌟".repeat(state.fortune_value as usize);
|
||||
let empty_stars = "☆".repeat((10 - state.fortune_value) as usize);
|
||||
|
||||
println!("\n{}", "AI Fortune".yellow().bold());
|
||||
println!("{}{}", fortune_stars, empty_stars);
|
||||
println!("Today's Fortune: {}/10", state.fortune_value);
|
||||
|
||||
if state.breakthrough_triggered {
|
||||
println!("{}", "⚡ BREAKTHROUGH! Special fortune activated!".yellow());
|
||||
}
|
||||
|
||||
println!();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn show_history(&self) {
|
||||
println!("\n{}", "Command History".cyan().bold());
|
||||
|
||||
let history = self.editor.history();
|
||||
if history.is_empty() {
|
||||
println!("{}", "No commands in history".yellow());
|
||||
return;
|
||||
}
|
||||
|
||||
// Show last 20 commands
|
||||
let start = if history.len() > 20 { history.len() - 20 } else { 0 };
|
||||
for (i, entry) in history.iter().enumerate().skip(start) {
|
||||
println!("{:2}: {}", i + 1, entry);
|
||||
}
|
||||
|
||||
println!();
|
||||
}
|
||||
|
||||
fn save_history(&mut self) -> Result<()> {
|
||||
let history_file = self.config.data_dir.join("shell_history.txt");
|
||||
self.editor.save_history(&history_file)
|
||||
.context("Failed to save shell history")?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
// Extend AIProvider to have Display and helper methods
|
||||
impl AIProvider {
|
||||
fn to_string(&self) -> String {
|
||||
match self {
|
||||
AIProvider::OpenAI => "openai".to_string(),
|
||||
AIProvider::Ollama => "ollama".to_string(),
|
||||
AIProvider::Claude => "claude".to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
51
src/status.rs
Normal file
51
src/status.rs
Normal file
@@ -0,0 +1,51 @@
|
||||
use std::path::PathBuf;
|
||||
use anyhow::Result;
|
||||
use colored::*;
|
||||
|
||||
use crate::config::Config;
|
||||
use crate::persona::Persona;
|
||||
|
||||
pub async fn handle_status(user_id: Option<String>, data_dir: Option<PathBuf>) -> Result<()> {
|
||||
// Load configuration
|
||||
let config = Config::new(data_dir)?;
|
||||
|
||||
// Initialize persona
|
||||
let persona = Persona::new(&config)?;
|
||||
|
||||
// Get current state
|
||||
let state = persona.get_current_state()?;
|
||||
|
||||
// Display AI status
|
||||
println!("{}", "ai.gpt Status".cyan().bold());
|
||||
println!("Mood: {}", state.current_mood);
|
||||
println!("Fortune: {}/10", state.fortune_value);
|
||||
|
||||
if state.breakthrough_triggered {
|
||||
println!("{}", "⚡ Breakthrough triggered!".yellow());
|
||||
}
|
||||
|
||||
// Show personality traits
|
||||
println!("\n{}", "Current Personality".cyan().bold());
|
||||
for (trait_name, value) in &state.base_personality {
|
||||
println!("{}: {:.2}", trait_name.cyan(), value);
|
||||
}
|
||||
|
||||
// Show specific relationship if requested
|
||||
if let Some(user_id) = user_id {
|
||||
if let Some(relationship) = persona.get_relationship(&user_id) {
|
||||
println!("\n{}: {}", "Relationship with".cyan(), user_id);
|
||||
println!("Status: {}", relationship.status);
|
||||
println!("Score: {:.2}", relationship.score);
|
||||
println!("Total Interactions: {}", relationship.total_interactions);
|
||||
println!("Transmission Enabled: {}", relationship.transmission_enabled);
|
||||
|
||||
if relationship.is_broken {
|
||||
println!("{}", "⚠️ This relationship is broken and cannot be repaired.".red());
|
||||
}
|
||||
} else {
|
||||
println!("\n{}: {}", "No relationship found with".yellow(), user_id);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
480
src/submodules.rs
Normal file
480
src/submodules.rs
Normal file
@@ -0,0 +1,480 @@
|
||||
use std::collections::HashMap;
|
||||
use std::path::PathBuf;
|
||||
use anyhow::{Result, Context};
|
||||
use colored::*;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::config::Config;
|
||||
|
||||
pub async fn handle_submodules(
|
||||
action: String,
|
||||
module: Option<String>,
|
||||
all: bool,
|
||||
dry_run: bool,
|
||||
auto_commit: bool,
|
||||
verbose: bool,
|
||||
data_dir: Option<PathBuf>,
|
||||
) -> Result<()> {
|
||||
let config = Config::new(data_dir)?;
|
||||
let mut submodule_manager = SubmoduleManager::new(config);
|
||||
|
||||
match action.as_str() {
|
||||
"list" => {
|
||||
submodule_manager.list_submodules(verbose).await?;
|
||||
}
|
||||
"update" => {
|
||||
submodule_manager.update_submodules(module, all, dry_run, auto_commit, verbose).await?;
|
||||
}
|
||||
"status" => {
|
||||
submodule_manager.show_submodule_status().await?;
|
||||
}
|
||||
_ => {
|
||||
return Err(anyhow::anyhow!("Unknown submodule action: {}", action));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct SubmoduleInfo {
|
||||
pub name: String,
|
||||
pub path: String,
|
||||
pub branch: String,
|
||||
pub current_commit: Option<String>,
|
||||
pub target_commit: Option<String>,
|
||||
pub status: String,
|
||||
}
|
||||
|
||||
impl Default for SubmoduleInfo {
|
||||
fn default() -> Self {
|
||||
SubmoduleInfo {
|
||||
name: String::new(),
|
||||
path: String::new(),
|
||||
branch: "main".to_string(),
|
||||
current_commit: None,
|
||||
target_commit: None,
|
||||
status: "unknown".to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub struct SubmoduleManager {
|
||||
config: Config,
|
||||
ai_root: PathBuf,
|
||||
submodules: HashMap<String, SubmoduleInfo>,
|
||||
}
|
||||
|
||||
impl SubmoduleManager {
|
||||
pub fn new(config: Config) -> Self {
|
||||
let ai_root = dirs::home_dir()
|
||||
.unwrap_or_else(|| PathBuf::from("."))
|
||||
.join("ai")
|
||||
.join("ai");
|
||||
|
||||
SubmoduleManager {
|
||||
config,
|
||||
ai_root,
|
||||
submodules: HashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn list_submodules(&mut self, verbose: bool) -> Result<()> {
|
||||
println!("{}", "📋 Submodules Status".cyan().bold());
|
||||
println!();
|
||||
|
||||
let submodules = self.parse_gitmodules()?;
|
||||
|
||||
if submodules.is_empty() {
|
||||
println!("{}", "No submodules found".yellow());
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Display submodules in a table format
|
||||
println!("{:<15} {:<25} {:<15} {}",
|
||||
"Module".cyan().bold(),
|
||||
"Path".cyan().bold(),
|
||||
"Branch".cyan().bold(),
|
||||
"Status".cyan().bold());
|
||||
println!("{}", "-".repeat(80));
|
||||
|
||||
for (module_name, module_info) in &submodules {
|
||||
let status_color = match module_info.status.as_str() {
|
||||
"clean" => module_info.status.green(),
|
||||
"modified" => module_info.status.yellow(),
|
||||
"missing" => module_info.status.red(),
|
||||
"conflicts" => module_info.status.red(),
|
||||
_ => module_info.status.normal(),
|
||||
};
|
||||
|
||||
println!("{:<15} {:<25} {:<15} {}",
|
||||
module_name.blue(),
|
||||
module_info.path,
|
||||
module_info.branch.green(),
|
||||
status_color);
|
||||
}
|
||||
|
||||
println!();
|
||||
|
||||
if verbose {
|
||||
println!("Total submodules: {}", submodules.len().to_string().cyan());
|
||||
println!("Repository root: {}", self.ai_root.display().to_string().blue());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn update_submodules(
|
||||
&mut self,
|
||||
module: Option<String>,
|
||||
all: bool,
|
||||
dry_run: bool,
|
||||
auto_commit: bool,
|
||||
verbose: bool
|
||||
) -> Result<()> {
|
||||
if !module.is_some() && !all {
|
||||
return Err(anyhow::anyhow!("Either --module or --all is required"));
|
||||
}
|
||||
|
||||
if module.is_some() && all {
|
||||
return Err(anyhow::anyhow!("Cannot use both --module and --all"));
|
||||
}
|
||||
|
||||
let submodules = self.parse_gitmodules()?;
|
||||
|
||||
if submodules.is_empty() {
|
||||
println!("{}", "No submodules found".yellow());
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Determine which modules to update
|
||||
let modules_to_update: Vec<String> = if all {
|
||||
submodules.keys().cloned().collect()
|
||||
} else if let Some(module_name) = module {
|
||||
if !submodules.contains_key(&module_name) {
|
||||
return Err(anyhow::anyhow!(
|
||||
"Submodule '{}' not found. Available modules: {}",
|
||||
module_name,
|
||||
submodules.keys().cloned().collect::<Vec<_>>().join(", ")
|
||||
));
|
||||
}
|
||||
vec![module_name]
|
||||
} else {
|
||||
vec![]
|
||||
};
|
||||
|
||||
if dry_run {
|
||||
println!("{}", "🔍 DRY RUN MODE - No changes will be made".yellow().bold());
|
||||
}
|
||||
|
||||
println!("{}", format!("🔄 Updating {} submodule(s)...", modules_to_update.len()).cyan().bold());
|
||||
|
||||
let mut updated_modules = Vec::new();
|
||||
|
||||
for module_name in modules_to_update {
|
||||
if let Some(module_info) = submodules.get(&module_name) {
|
||||
println!("\n{}", format!("📦 Processing: {}", module_name).blue().bold());
|
||||
|
||||
let module_path = PathBuf::from(&module_info.path);
|
||||
let full_path = self.ai_root.join(&module_path);
|
||||
|
||||
if !full_path.exists() {
|
||||
println!("{}", format!("❌ Module directory not found: {}", module_info.path).red());
|
||||
continue;
|
||||
}
|
||||
|
||||
// Get current commit
|
||||
let current_commit = self.get_current_commit(&full_path)?;
|
||||
|
||||
if dry_run {
|
||||
println!("{}", format!("🔍 Would update {} to branch {}", module_name, module_info.branch).yellow());
|
||||
if let Some(ref commit) = current_commit {
|
||||
println!("{}", format!("Current: {}", commit).dimmed());
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
// Perform update
|
||||
if let Err(e) = self.update_single_module(&module_name, &module_info, &full_path).await {
|
||||
println!("{}", format!("❌ Failed to update {}: {}", module_name, e).red());
|
||||
continue;
|
||||
}
|
||||
|
||||
// Get new commit
|
||||
let new_commit = self.get_current_commit(&full_path)?;
|
||||
|
||||
if current_commit != new_commit {
|
||||
println!("{}", format!("✅ Updated {} ({:?} → {:?})",
|
||||
module_name,
|
||||
current_commit.as_deref().unwrap_or("unknown"),
|
||||
new_commit.as_deref().unwrap_or("unknown")).green());
|
||||
updated_modules.push((module_name.clone(), current_commit, new_commit));
|
||||
} else {
|
||||
println!("{}", "✅ Already up to date".green());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Summary
|
||||
if !updated_modules.is_empty() {
|
||||
println!("\n{}", format!("🎉 Successfully updated {} module(s)", updated_modules.len()).green().bold());
|
||||
|
||||
if verbose {
|
||||
for (module_name, old_commit, new_commit) in &updated_modules {
|
||||
println!(" • {}: {:?} → {:?}",
|
||||
module_name,
|
||||
old_commit.as_deref().unwrap_or("unknown"),
|
||||
new_commit.as_deref().unwrap_or("unknown"));
|
||||
}
|
||||
}
|
||||
|
||||
if auto_commit && !dry_run {
|
||||
self.auto_commit_changes(&updated_modules).await?;
|
||||
} else if !dry_run {
|
||||
println!("{}", "💾 Changes staged but not committed".yellow());
|
||||
println!("Run with --auto-commit to commit automatically");
|
||||
}
|
||||
} else if !dry_run {
|
||||
println!("{}", "No modules needed updating".yellow());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn show_submodule_status(&self) -> Result<()> {
|
||||
println!("{}", "📊 Submodule Status Overview".cyan().bold());
|
||||
println!();
|
||||
|
||||
let submodules = self.parse_gitmodules()?;
|
||||
let mut total_modules = 0;
|
||||
let mut clean_modules = 0;
|
||||
let mut modified_modules = 0;
|
||||
let mut missing_modules = 0;
|
||||
|
||||
for (module_name, module_info) in submodules {
|
||||
let module_path = self.ai_root.join(&module_info.path);
|
||||
|
||||
if module_path.exists() {
|
||||
total_modules += 1;
|
||||
match module_info.status.as_str() {
|
||||
"clean" => clean_modules += 1,
|
||||
"modified" => modified_modules += 1,
|
||||
_ => {}
|
||||
}
|
||||
} else {
|
||||
missing_modules += 1;
|
||||
}
|
||||
|
||||
println!("{}: {}",
|
||||
module_name.blue(),
|
||||
if module_path.exists() {
|
||||
module_info.status.green()
|
||||
} else {
|
||||
"missing".red()
|
||||
});
|
||||
}
|
||||
|
||||
println!();
|
||||
println!("Summary: {} total, {} clean, {} modified, {} missing",
|
||||
total_modules.to_string().cyan(),
|
||||
clean_modules.to_string().green(),
|
||||
modified_modules.to_string().yellow(),
|
||||
missing_modules.to_string().red());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn parse_gitmodules(&self) -> Result<HashMap<String, SubmoduleInfo>> {
|
||||
let gitmodules_path = self.ai_root.join(".gitmodules");
|
||||
|
||||
if !gitmodules_path.exists() {
|
||||
return Ok(HashMap::new());
|
||||
}
|
||||
|
||||
let content = std::fs::read_to_string(&gitmodules_path)
|
||||
.with_context(|| format!("Failed to read .gitmodules file: {}", gitmodules_path.display()))?;
|
||||
|
||||
let mut submodules = HashMap::new();
|
||||
let mut current_name: Option<String> = None;
|
||||
let mut current_path: Option<String> = None;
|
||||
|
||||
for line in content.lines() {
|
||||
let line = line.trim();
|
||||
|
||||
if line.starts_with("[submodule \"") && line.ends_with("\"]") {
|
||||
// Save previous submodule if complete
|
||||
if let (Some(name), Some(path)) = (current_name.take(), current_path.take()) {
|
||||
let mut info = SubmoduleInfo::default();
|
||||
info.name = name.clone();
|
||||
info.path = path;
|
||||
info.branch = self.get_target_branch(&name);
|
||||
info.status = self.get_submodule_status(&name, &info.path)?;
|
||||
submodules.insert(name, info);
|
||||
}
|
||||
|
||||
// Extract new submodule name
|
||||
current_name = Some(line[12..line.len()-2].to_string());
|
||||
} else if line.starts_with("path = ") {
|
||||
current_path = Some(line[7..].to_string());
|
||||
}
|
||||
}
|
||||
|
||||
// Save last submodule
|
||||
if let (Some(name), Some(path)) = (current_name, current_path) {
|
||||
let mut info = SubmoduleInfo::default();
|
||||
info.name = name.clone();
|
||||
info.path = path;
|
||||
info.branch = self.get_target_branch(&name);
|
||||
info.status = self.get_submodule_status(&name, &info.path)?;
|
||||
submodules.insert(name, info);
|
||||
}
|
||||
|
||||
Ok(submodules)
|
||||
}
|
||||
|
||||
fn get_target_branch(&self, module_name: &str) -> String {
|
||||
// Try to get from ai.json configuration
|
||||
match module_name {
|
||||
"verse" => "main".to_string(),
|
||||
"card" => "main".to_string(),
|
||||
"bot" => "main".to_string(),
|
||||
_ => "main".to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
fn get_submodule_status(&self, _module_name: &str, module_path: &str) -> Result<String> {
|
||||
let full_path = self.ai_root.join(module_path);
|
||||
|
||||
if !full_path.exists() {
|
||||
return Ok("missing".to_string());
|
||||
}
|
||||
|
||||
// Check git status
|
||||
let output = std::process::Command::new("git")
|
||||
.args(&["submodule", "status", module_path])
|
||||
.current_dir(&self.ai_root)
|
||||
.output();
|
||||
|
||||
match output {
|
||||
Ok(output) if output.status.success() => {
|
||||
let stdout = String::from_utf8_lossy(&output.stdout);
|
||||
if let Some(status_char) = stdout.chars().next() {
|
||||
match status_char {
|
||||
' ' => Ok("clean".to_string()),
|
||||
'+' => Ok("modified".to_string()),
|
||||
'-' => Ok("not_initialized".to_string()),
|
||||
'U' => Ok("conflicts".to_string()),
|
||||
_ => Ok("unknown".to_string()),
|
||||
}
|
||||
} else {
|
||||
Ok("unknown".to_string())
|
||||
}
|
||||
}
|
||||
_ => Ok("unknown".to_string())
|
||||
}
|
||||
}
|
||||
|
||||
fn get_current_commit(&self, module_path: &PathBuf) -> Result<Option<String>> {
|
||||
let output = std::process::Command::new("git")
|
||||
.args(&["rev-parse", "HEAD"])
|
||||
.current_dir(module_path)
|
||||
.output();
|
||||
|
||||
match output {
|
||||
Ok(output) if output.status.success() => {
|
||||
let commit = String::from_utf8_lossy(&output.stdout).trim().to_string();
|
||||
if commit.len() >= 8 {
|
||||
Ok(Some(commit[..8].to_string()))
|
||||
} else {
|
||||
Ok(Some(commit))
|
||||
}
|
||||
}
|
||||
_ => Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
async fn update_single_module(
|
||||
&self,
|
||||
_module_name: &str,
|
||||
module_info: &SubmoduleInfo,
|
||||
module_path: &PathBuf
|
||||
) -> Result<()> {
|
||||
// Fetch latest changes
|
||||
println!("{}", "Fetching latest changes...".dimmed());
|
||||
let fetch_output = std::process::Command::new("git")
|
||||
.args(&["fetch", "origin"])
|
||||
.current_dir(module_path)
|
||||
.output()?;
|
||||
|
||||
if !fetch_output.status.success() {
|
||||
return Err(anyhow::anyhow!("Failed to fetch: {}",
|
||||
String::from_utf8_lossy(&fetch_output.stderr)));
|
||||
}
|
||||
|
||||
// Switch to target branch
|
||||
println!("{}", format!("Switching to branch {}...", module_info.branch).dimmed());
|
||||
let checkout_output = std::process::Command::new("git")
|
||||
.args(&["checkout", &module_info.branch])
|
||||
.current_dir(module_path)
|
||||
.output()?;
|
||||
|
||||
if !checkout_output.status.success() {
|
||||
return Err(anyhow::anyhow!("Failed to checkout {}: {}",
|
||||
module_info.branch, String::from_utf8_lossy(&checkout_output.stderr)));
|
||||
}
|
||||
|
||||
// Pull latest changes
|
||||
let pull_output = std::process::Command::new("git")
|
||||
.args(&["pull", "origin", &module_info.branch])
|
||||
.current_dir(module_path)
|
||||
.output()?;
|
||||
|
||||
if !pull_output.status.success() {
|
||||
return Err(anyhow::anyhow!("Failed to pull: {}",
|
||||
String::from_utf8_lossy(&pull_output.stderr)));
|
||||
}
|
||||
|
||||
// Stage the submodule update
|
||||
let add_output = std::process::Command::new("git")
|
||||
.args(&["add", &module_info.path])
|
||||
.current_dir(&self.ai_root)
|
||||
.output()?;
|
||||
|
||||
if !add_output.status.success() {
|
||||
return Err(anyhow::anyhow!("Failed to stage submodule: {}",
|
||||
String::from_utf8_lossy(&add_output.stderr)));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn auto_commit_changes(&self, updated_modules: &[(String, Option<String>, Option<String>)]) -> Result<()> {
|
||||
println!("{}", "💾 Auto-committing changes...".blue());
|
||||
|
||||
let mut commit_message = format!("Update submodules\n\n📦 Updated modules: {}\n", updated_modules.len());
|
||||
for (module_name, old_commit, new_commit) in updated_modules {
|
||||
commit_message.push_str(&format!(
|
||||
"- {}: {} → {}\n",
|
||||
module_name,
|
||||
old_commit.as_deref().unwrap_or("unknown"),
|
||||
new_commit.as_deref().unwrap_or("unknown")
|
||||
));
|
||||
}
|
||||
commit_message.push_str("\n🤖 Generated with aigpt-rs submodules update");
|
||||
|
||||
let commit_output = std::process::Command::new("git")
|
||||
.args(&["commit", "-m", &commit_message])
|
||||
.current_dir(&self.ai_root)
|
||||
.output()?;
|
||||
|
||||
if commit_output.status.success() {
|
||||
println!("{}", "✅ Changes committed successfully".green());
|
||||
} else {
|
||||
return Err(anyhow::anyhow!("Failed to commit: {}",
|
||||
String::from_utf8_lossy(&commit_output.stderr)));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
505
src/tokens.rs
Normal file
505
src/tokens.rs
Normal file
@@ -0,0 +1,505 @@
|
||||
use anyhow::{anyhow, Result};
|
||||
use chrono::{DateTime, Local};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
use std::fs::File;
|
||||
use std::io::{BufRead, BufReader};
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use crate::cli::TokenCommands;
|
||||
|
||||
/// Token usage record from Claude Code JSONL files
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
pub struct TokenRecord {
|
||||
#[serde(default)]
|
||||
pub timestamp: String,
|
||||
#[serde(default)]
|
||||
pub usage: Option<TokenUsage>,
|
||||
#[serde(default)]
|
||||
pub model: Option<String>,
|
||||
#[serde(default)]
|
||||
pub conversation_id: Option<String>,
|
||||
}
|
||||
|
||||
/// Token usage details
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
pub struct TokenUsage {
|
||||
#[serde(default)]
|
||||
pub input_tokens: Option<u64>,
|
||||
#[serde(default)]
|
||||
pub output_tokens: Option<u64>,
|
||||
#[serde(default)]
|
||||
pub total_tokens: Option<u64>,
|
||||
}
|
||||
|
||||
/// Cost calculation summary
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct CostSummary {
|
||||
pub input_tokens: u64,
|
||||
pub output_tokens: u64,
|
||||
pub total_tokens: u64,
|
||||
pub input_cost_usd: f64,
|
||||
pub output_cost_usd: f64,
|
||||
pub total_cost_usd: f64,
|
||||
pub total_cost_jpy: f64,
|
||||
pub record_count: usize,
|
||||
}
|
||||
|
||||
/// Daily breakdown of token usage
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct DailyBreakdown {
|
||||
pub date: String,
|
||||
pub summary: CostSummary,
|
||||
}
|
||||
|
||||
/// Configuration for cost calculation
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct CostConfig {
|
||||
pub input_cost_per_1m: f64, // USD per 1M input tokens
|
||||
pub output_cost_per_1m: f64, // USD per 1M output tokens
|
||||
pub usd_to_jpy_rate: f64,
|
||||
}
|
||||
|
||||
impl Default for CostConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
input_cost_per_1m: 3.0,
|
||||
output_cost_per_1m: 15.0,
|
||||
usd_to_jpy_rate: 150.0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Token analysis functionality
|
||||
pub struct TokenAnalyzer {
|
||||
config: CostConfig,
|
||||
}
|
||||
|
||||
impl TokenAnalyzer {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
config: CostConfig::default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn with_config(config: CostConfig) -> Self {
|
||||
Self { config }
|
||||
}
|
||||
|
||||
/// Find Claude Code data directory
|
||||
pub fn find_claude_data_dir() -> Option<PathBuf> {
|
||||
let possible_dirs = [
|
||||
dirs::home_dir().map(|h| h.join(".claude")),
|
||||
dirs::config_dir().map(|c| c.join("claude")),
|
||||
Some(PathBuf::from(".claude")),
|
||||
];
|
||||
|
||||
for dir_opt in possible_dirs.iter() {
|
||||
if let Some(dir) = dir_opt {
|
||||
if dir.exists() && dir.is_dir() {
|
||||
return Some(dir.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
/// Parse JSONL files from Claude data directory
|
||||
pub fn parse_jsonl_files<P: AsRef<Path>>(&self, claude_dir: P) -> Result<Vec<TokenRecord>> {
|
||||
let claude_dir = claude_dir.as_ref();
|
||||
let mut records = Vec::new();
|
||||
|
||||
// Look for JSONL files in the directory
|
||||
if let Ok(entries) = std::fs::read_dir(claude_dir) {
|
||||
for entry in entries.flatten() {
|
||||
let path = entry.path();
|
||||
if path.extension().map_or(false, |ext| ext == "jsonl") {
|
||||
match self.parse_jsonl_file(&path) {
|
||||
Ok(mut file_records) => records.append(&mut file_records),
|
||||
Err(e) => {
|
||||
eprintln!("Warning: Failed to parse {}: {}", path.display(), e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(records)
|
||||
}
|
||||
|
||||
/// Parse a single JSONL file
|
||||
fn parse_jsonl_file<P: AsRef<Path>>(&self, file_path: P) -> Result<Vec<TokenRecord>> {
|
||||
let file = File::open(file_path)?;
|
||||
let reader = BufReader::new(file);
|
||||
let mut records = Vec::new();
|
||||
|
||||
for (line_num, line) in reader.lines().enumerate() {
|
||||
match line {
|
||||
Ok(line_content) => {
|
||||
if line_content.trim().is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
match serde_json::from_str::<TokenRecord>(&line_content) {
|
||||
Ok(record) => {
|
||||
// Only include records with usage data
|
||||
if record.usage.is_some() {
|
||||
records.push(record);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
eprintln!("Warning: Failed to parse line {}: {}", line_num + 1, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
eprintln!("Warning: Failed to read line {}: {}", line_num + 1, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(records)
|
||||
}
|
||||
|
||||
/// Calculate cost summary from records
|
||||
pub fn calculate_costs(&self, records: &[TokenRecord]) -> CostSummary {
|
||||
let mut input_tokens = 0u64;
|
||||
let mut output_tokens = 0u64;
|
||||
|
||||
for record in records {
|
||||
if let Some(usage) = &record.usage {
|
||||
input_tokens += usage.input_tokens.unwrap_or(0);
|
||||
output_tokens += usage.output_tokens.unwrap_or(0);
|
||||
}
|
||||
}
|
||||
|
||||
let total_tokens = input_tokens + output_tokens;
|
||||
let input_cost_usd = (input_tokens as f64 / 1_000_000.0) * self.config.input_cost_per_1m;
|
||||
let output_cost_usd = (output_tokens as f64 / 1_000_000.0) * self.config.output_cost_per_1m;
|
||||
let total_cost_usd = input_cost_usd + output_cost_usd;
|
||||
let total_cost_jpy = total_cost_usd * self.config.usd_to_jpy_rate;
|
||||
|
||||
CostSummary {
|
||||
input_tokens,
|
||||
output_tokens,
|
||||
total_tokens,
|
||||
input_cost_usd,
|
||||
output_cost_usd,
|
||||
total_cost_usd,
|
||||
total_cost_jpy,
|
||||
record_count: records.len(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Group records by date (JST timezone)
|
||||
pub fn group_by_date(&self, records: &[TokenRecord]) -> Result<HashMap<String, Vec<TokenRecord>>> {
|
||||
let mut grouped: HashMap<String, Vec<TokenRecord>> = HashMap::new();
|
||||
|
||||
for record in records {
|
||||
let date_str = self.extract_date_jst(&record.timestamp)?;
|
||||
grouped.entry(date_str).or_insert_with(Vec::new).push(record.clone());
|
||||
}
|
||||
|
||||
Ok(grouped)
|
||||
}
|
||||
|
||||
/// Extract date in JST from timestamp
|
||||
fn extract_date_jst(&self, timestamp: &str) -> Result<String> {
|
||||
if timestamp.is_empty() {
|
||||
return Err(anyhow!("Empty timestamp"));
|
||||
}
|
||||
|
||||
// Try to parse various timestamp formats
|
||||
let dt = if let Ok(dt) = DateTime::parse_from_rfc3339(timestamp) {
|
||||
dt.with_timezone(&chrono_tz::Asia::Tokyo)
|
||||
} else if let Ok(dt) = DateTime::parse_from_str(timestamp, "%Y-%m-%dT%H:%M:%S%.fZ") {
|
||||
dt.with_timezone(&chrono_tz::Asia::Tokyo)
|
||||
} else if let Ok(dt) = chrono::DateTime::parse_from_str(timestamp, "%Y-%m-%d %H:%M:%S") {
|
||||
dt.with_timezone(&chrono_tz::Asia::Tokyo)
|
||||
} else {
|
||||
return Err(anyhow!("Failed to parse timestamp: {}", timestamp));
|
||||
};
|
||||
|
||||
Ok(dt.format("%Y-%m-%d").to_string())
|
||||
}
|
||||
|
||||
/// Generate daily breakdown
|
||||
pub fn daily_breakdown(&self, records: &[TokenRecord]) -> Result<Vec<DailyBreakdown>> {
|
||||
let grouped = self.group_by_date(records)?;
|
||||
let mut breakdowns: Vec<DailyBreakdown> = grouped
|
||||
.into_iter()
|
||||
.map(|(date, date_records)| DailyBreakdown {
|
||||
date,
|
||||
summary: self.calculate_costs(&date_records),
|
||||
})
|
||||
.collect();
|
||||
|
||||
// Sort by date (most recent first)
|
||||
breakdowns.sort_by(|a, b| b.date.cmp(&a.date));
|
||||
|
||||
Ok(breakdowns)
|
||||
}
|
||||
|
||||
/// Filter records by time period
|
||||
pub fn filter_by_period(&self, records: &[TokenRecord], period: &str) -> Result<Vec<TokenRecord>> {
|
||||
let now = Local::now();
|
||||
let cutoff = match period {
|
||||
"today" => now.date_naive().and_hms_opt(0, 0, 0).unwrap(),
|
||||
"week" => (now - chrono::Duration::days(7)).naive_local(),
|
||||
"month" => (now - chrono::Duration::days(30)).naive_local(),
|
||||
"all" => return Ok(records.to_vec()),
|
||||
_ => return Err(anyhow!("Invalid period: {}", period)),
|
||||
};
|
||||
|
||||
let filtered: Vec<TokenRecord> = records
|
||||
.iter()
|
||||
.filter(|record| {
|
||||
if let Ok(date_str) = self.extract_date_jst(&record.timestamp) {
|
||||
if let Ok(record_date) = chrono::NaiveDate::parse_from_str(&date_str, "%Y-%m-%d") {
|
||||
return record_date.and_hms_opt(0, 0, 0).unwrap() >= cutoff;
|
||||
}
|
||||
}
|
||||
false
|
||||
})
|
||||
.cloned()
|
||||
.collect();
|
||||
|
||||
Ok(filtered)
|
||||
}
|
||||
}
|
||||
|
||||
/// Handle token-related commands
|
||||
pub async fn handle_tokens(command: TokenCommands) -> Result<()> {
|
||||
match command {
|
||||
TokenCommands::Summary { period, claude_dir, details, format } => {
|
||||
handle_summary(
|
||||
period.unwrap_or_else(|| "week".to_string()),
|
||||
claude_dir,
|
||||
details,
|
||||
format.unwrap_or_else(|| "table".to_string())
|
||||
).await
|
||||
}
|
||||
TokenCommands::Daily { days, claude_dir } => {
|
||||
handle_daily(days.unwrap_or(7), claude_dir).await
|
||||
}
|
||||
TokenCommands::Status { claude_dir } => {
|
||||
handle_status(claude_dir).await
|
||||
}
|
||||
TokenCommands::Analyze { file } => {
|
||||
println!("Token analysis for file: {:?} - Not implemented yet", file);
|
||||
Ok(())
|
||||
}
|
||||
TokenCommands::Report { days } => {
|
||||
println!("Token report for {} days - Not implemented yet", days.unwrap_or(7));
|
||||
Ok(())
|
||||
}
|
||||
TokenCommands::Cost { month } => {
|
||||
println!("Token cost for month: {} - Not implemented yet", month.unwrap_or_else(|| "current".to_string()));
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Handle summary command
|
||||
async fn handle_summary(
|
||||
period: String,
|
||||
claude_dir: Option<PathBuf>,
|
||||
details: bool,
|
||||
format: String,
|
||||
) -> Result<()> {
|
||||
let analyzer = TokenAnalyzer::new();
|
||||
|
||||
// Find Claude data directory
|
||||
let data_dir = claude_dir.or_else(|| TokenAnalyzer::find_claude_data_dir())
|
||||
.ok_or_else(|| anyhow!("Claude Code data directory not found"))?;
|
||||
|
||||
println!("Loading data from: {}", data_dir.display());
|
||||
|
||||
// Parse records
|
||||
let all_records = analyzer.parse_jsonl_files(&data_dir)?;
|
||||
if all_records.is_empty() {
|
||||
println!("No token usage data found");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Filter by period
|
||||
let filtered_records = analyzer.filter_by_period(&all_records, &period)?;
|
||||
if filtered_records.is_empty() {
|
||||
println!("No data found for period: {}", period);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Calculate summary
|
||||
let summary = analyzer.calculate_costs(&filtered_records);
|
||||
|
||||
// Output results
|
||||
match format.as_str() {
|
||||
"json" => {
|
||||
println!("{}", serde_json::to_string_pretty(&summary)?);
|
||||
}
|
||||
"table" | _ => {
|
||||
print_summary_table(&summary, &period, details);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Handle daily command
|
||||
async fn handle_daily(days: u32, claude_dir: Option<PathBuf>) -> Result<()> {
|
||||
let analyzer = TokenAnalyzer::new();
|
||||
|
||||
// Find Claude data directory
|
||||
let data_dir = claude_dir.or_else(|| TokenAnalyzer::find_claude_data_dir())
|
||||
.ok_or_else(|| anyhow!("Claude Code data directory not found"))?;
|
||||
|
||||
println!("Loading data from: {}", data_dir.display());
|
||||
|
||||
// Parse records
|
||||
let records = analyzer.parse_jsonl_files(&data_dir)?;
|
||||
if records.is_empty() {
|
||||
println!("No token usage data found");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Generate daily breakdown
|
||||
let breakdown = analyzer.daily_breakdown(&records)?;
|
||||
let limited_breakdown: Vec<_> = breakdown.into_iter().take(days as usize).collect();
|
||||
|
||||
// Print daily breakdown
|
||||
print_daily_breakdown(&limited_breakdown);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Handle status command
|
||||
async fn handle_status(claude_dir: Option<PathBuf>) -> Result<()> {
|
||||
let analyzer = TokenAnalyzer::new();
|
||||
|
||||
// Find Claude data directory
|
||||
let data_dir = claude_dir.or_else(|| TokenAnalyzer::find_claude_data_dir());
|
||||
|
||||
match data_dir {
|
||||
Some(dir) => {
|
||||
println!("Claude Code data directory: {}", dir.display());
|
||||
|
||||
// Parse records to get basic stats
|
||||
let records = analyzer.parse_jsonl_files(&dir)?;
|
||||
let summary = analyzer.calculate_costs(&records);
|
||||
|
||||
println!("Total records: {}", summary.record_count);
|
||||
println!("Total tokens: {}", summary.total_tokens);
|
||||
println!("Estimated total cost: ${:.4} USD (¥{:.0} JPY)",
|
||||
summary.total_cost_usd, summary.total_cost_jpy);
|
||||
}
|
||||
None => {
|
||||
println!("Claude Code data directory not found");
|
||||
println!("Checked locations:");
|
||||
println!(" - ~/.claude");
|
||||
println!(" - ~/.config/claude");
|
||||
println!(" - ./.claude");
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Print summary table
|
||||
fn print_summary_table(summary: &CostSummary, period: &str, details: bool) {
|
||||
println!("\n=== Claude Code Token Usage Summary ({}) ===", period);
|
||||
println!();
|
||||
|
||||
println!("📊 Token Usage:");
|
||||
println!(" Input tokens: {:>12}", format_number(summary.input_tokens));
|
||||
println!(" Output tokens: {:>12}", format_number(summary.output_tokens));
|
||||
println!(" Total tokens: {:>12}", format_number(summary.total_tokens));
|
||||
println!();
|
||||
|
||||
println!("💰 Cost Estimation:");
|
||||
println!(" Input cost: {:>12}", format!("${:.4} USD", summary.input_cost_usd));
|
||||
println!(" Output cost: {:>12}", format!("${:.4} USD", summary.output_cost_usd));
|
||||
println!(" Total cost: {:>12}", format!("${:.4} USD", summary.total_cost_usd));
|
||||
println!(" Total cost: {:>12}", format!("¥{:.0} JPY", summary.total_cost_jpy));
|
||||
println!();
|
||||
|
||||
if details {
|
||||
println!("📈 Additional Details:");
|
||||
println!(" Records: {:>12}", format_number(summary.record_count as u64));
|
||||
println!(" Avg per record:{:>12}", format!("${:.4} USD",
|
||||
if summary.record_count > 0 { summary.total_cost_usd / summary.record_count as f64 } else { 0.0 }));
|
||||
println!();
|
||||
}
|
||||
|
||||
println!("💡 Cost calculation based on:");
|
||||
println!(" Input: $3.00 per 1M tokens");
|
||||
println!(" Output: $15.00 per 1M tokens");
|
||||
println!(" USD to JPY: 150.0");
|
||||
}
|
||||
|
||||
/// Print daily breakdown
|
||||
fn print_daily_breakdown(breakdown: &[DailyBreakdown]) {
|
||||
println!("\n=== Daily Token Usage Breakdown ===");
|
||||
println!();
|
||||
|
||||
for daily in breakdown {
|
||||
println!("📅 {} (Records: {})", daily.date, daily.summary.record_count);
|
||||
println!(" Tokens: {} input + {} output = {} total",
|
||||
format_number(daily.summary.input_tokens),
|
||||
format_number(daily.summary.output_tokens),
|
||||
format_number(daily.summary.total_tokens));
|
||||
println!(" Cost: ${:.4} USD (¥{:.0} JPY)",
|
||||
daily.summary.total_cost_usd,
|
||||
daily.summary.total_cost_jpy);
|
||||
println!();
|
||||
}
|
||||
}
|
||||
|
||||
/// Format large numbers with commas
|
||||
fn format_number(n: u64) -> String {
|
||||
let s = n.to_string();
|
||||
let mut result = String::new();
|
||||
for (i, c) in s.chars().rev().enumerate() {
|
||||
if i > 0 && i % 3 == 0 {
|
||||
result.push(',');
|
||||
}
|
||||
result.push(c);
|
||||
}
|
||||
result.chars().rev().collect()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_cost_calculation() {
|
||||
let analyzer = TokenAnalyzer::new();
|
||||
let records = vec![
|
||||
TokenRecord {
|
||||
timestamp: "2024-01-01T10:00:00Z".to_string(),
|
||||
usage: Some(TokenUsage {
|
||||
input_tokens: Some(1000),
|
||||
output_tokens: Some(500),
|
||||
total_tokens: Some(1500),
|
||||
}),
|
||||
model: Some("claude-3".to_string()),
|
||||
conversation_id: Some("test".to_string()),
|
||||
},
|
||||
];
|
||||
|
||||
let summary = analyzer.calculate_costs(&records);
|
||||
assert_eq!(summary.input_tokens, 1000);
|
||||
assert_eq!(summary.output_tokens, 500);
|
||||
assert_eq!(summary.total_tokens, 1500);
|
||||
assert_eq!(summary.record_count, 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_date_extraction() {
|
||||
let analyzer = TokenAnalyzer::new();
|
||||
let result = analyzer.extract_date_jst("2024-01-01T10:00:00Z");
|
||||
assert!(result.is_ok());
|
||||
// Note: The exact date depends on JST conversion
|
||||
}
|
||||
}
|
||||
423
src/transmission.rs
Normal file
423
src/transmission.rs
Normal file
@@ -0,0 +1,423 @@
|
||||
use std::collections::HashMap;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use anyhow::{Result, Context};
|
||||
use chrono::{DateTime, Utc};
|
||||
|
||||
use crate::config::Config;
|
||||
use crate::persona::Persona;
|
||||
use crate::relationship::{Relationship, RelationshipStatus};
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct TransmissionLog {
|
||||
pub user_id: String,
|
||||
pub message: String,
|
||||
pub timestamp: DateTime<Utc>,
|
||||
pub transmission_type: TransmissionType,
|
||||
pub success: bool,
|
||||
pub error: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub enum TransmissionType {
|
||||
Autonomous, // AI decided to send
|
||||
Scheduled, // Time-based trigger
|
||||
Breakthrough, // Fortune breakthrough triggered
|
||||
Maintenance, // Daily maintenance message
|
||||
}
|
||||
|
||||
impl std::fmt::Display for TransmissionType {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
TransmissionType::Autonomous => write!(f, "autonomous"),
|
||||
TransmissionType::Scheduled => write!(f, "scheduled"),
|
||||
TransmissionType::Breakthrough => write!(f, "breakthrough"),
|
||||
TransmissionType::Maintenance => write!(f, "maintenance"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct TransmissionController {
|
||||
config: Config,
|
||||
transmission_history: Vec<TransmissionLog>,
|
||||
last_check: Option<DateTime<Utc>>,
|
||||
}
|
||||
|
||||
impl TransmissionController {
|
||||
pub fn new(config: Config) -> Result<Self> {
|
||||
let transmission_history = Self::load_transmission_history(&config)?;
|
||||
|
||||
Ok(TransmissionController {
|
||||
config,
|
||||
transmission_history,
|
||||
last_check: None,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn check_autonomous_transmissions(&mut self, persona: &mut Persona) -> Result<Vec<TransmissionLog>> {
|
||||
let mut transmissions = Vec::new();
|
||||
let now = Utc::now();
|
||||
|
||||
// Get all transmission-eligible relationships
|
||||
let eligible_user_ids: Vec<String> = {
|
||||
let relationships = persona.list_all_relationships();
|
||||
relationships.iter()
|
||||
.filter(|(_, rel)| rel.transmission_enabled && !rel.is_broken)
|
||||
.filter(|(_, rel)| rel.score >= rel.threshold)
|
||||
.map(|(id, _)| id.clone())
|
||||
.collect()
|
||||
};
|
||||
|
||||
for user_id in eligible_user_ids {
|
||||
// Get fresh relationship data for each check
|
||||
if let Some(relationship) = persona.get_relationship(&user_id) {
|
||||
// Check if enough time has passed since last transmission
|
||||
if let Some(last_transmission) = relationship.last_transmission {
|
||||
let hours_since_last = (now - last_transmission).num_hours();
|
||||
if hours_since_last < 24 {
|
||||
continue; // Skip if transmitted in last 24 hours
|
||||
}
|
||||
}
|
||||
|
||||
// Check if conditions are met for autonomous transmission
|
||||
if self.should_transmit_to_user(&user_id, relationship, persona)? {
|
||||
let transmission = self.generate_autonomous_transmission(persona, &user_id).await?;
|
||||
transmissions.push(transmission);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
self.last_check = Some(now);
|
||||
self.save_transmission_history()?;
|
||||
|
||||
Ok(transmissions)
|
||||
}
|
||||
|
||||
pub async fn check_breakthrough_transmissions(&mut self, persona: &mut Persona) -> Result<Vec<TransmissionLog>> {
|
||||
let mut transmissions = Vec::new();
|
||||
let state = persona.get_current_state()?;
|
||||
|
||||
// Only trigger breakthrough transmissions if fortune is very high
|
||||
if !state.breakthrough_triggered || state.fortune_value < 9 {
|
||||
return Ok(transmissions);
|
||||
}
|
||||
|
||||
// Get close relationships for breakthrough sharing
|
||||
let relationships = persona.list_all_relationships();
|
||||
let close_friends: Vec<_> = relationships.iter()
|
||||
.filter(|(_, rel)| matches!(rel.status, RelationshipStatus::Friend | RelationshipStatus::CloseFriend))
|
||||
.filter(|(_, rel)| rel.transmission_enabled && !rel.is_broken)
|
||||
.collect();
|
||||
|
||||
for (user_id, _relationship) in close_friends {
|
||||
// Check if we haven't sent a breakthrough message today
|
||||
let today = chrono::Utc::now().date_naive();
|
||||
let already_sent_today = self.transmission_history.iter()
|
||||
.any(|log| {
|
||||
log.user_id == *user_id &&
|
||||
matches!(log.transmission_type, TransmissionType::Breakthrough) &&
|
||||
log.timestamp.date_naive() == today
|
||||
});
|
||||
|
||||
if !already_sent_today {
|
||||
let transmission = self.generate_breakthrough_transmission(persona, user_id).await?;
|
||||
transmissions.push(transmission);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(transmissions)
|
||||
}
|
||||
|
||||
pub async fn check_maintenance_transmissions(&mut self, persona: &mut Persona) -> Result<Vec<TransmissionLog>> {
|
||||
let mut transmissions = Vec::new();
|
||||
let now = Utc::now();
|
||||
|
||||
// Only send maintenance messages once per day
|
||||
let today = now.date_naive();
|
||||
let already_sent_today = self.transmission_history.iter()
|
||||
.any(|log| {
|
||||
matches!(log.transmission_type, TransmissionType::Maintenance) &&
|
||||
log.timestamp.date_naive() == today
|
||||
});
|
||||
|
||||
if already_sent_today {
|
||||
return Ok(transmissions);
|
||||
}
|
||||
|
||||
// Apply daily maintenance to persona
|
||||
persona.daily_maintenance()?;
|
||||
|
||||
// Get relationships that might need a maintenance check-in
|
||||
let relationships = persona.list_all_relationships();
|
||||
let maintenance_candidates: Vec<_> = relationships.iter()
|
||||
.filter(|(_, rel)| rel.transmission_enabled && !rel.is_broken)
|
||||
.filter(|(_, rel)| {
|
||||
// Send maintenance to relationships that haven't been contacted in a while
|
||||
if let Some(last_interaction) = rel.last_interaction {
|
||||
let days_since = (now - last_interaction).num_days();
|
||||
days_since >= 7 // Haven't talked in a week
|
||||
} else {
|
||||
false
|
||||
}
|
||||
})
|
||||
.take(3) // Limit to 3 maintenance messages per day
|
||||
.collect();
|
||||
|
||||
for (user_id, _) in maintenance_candidates {
|
||||
let transmission = self.generate_maintenance_transmission(persona, user_id).await?;
|
||||
transmissions.push(transmission);
|
||||
}
|
||||
|
||||
Ok(transmissions)
|
||||
}
|
||||
|
||||
fn should_transmit_to_user(&self, user_id: &str, relationship: &Relationship, persona: &Persona) -> Result<bool> {
|
||||
// Basic transmission criteria
|
||||
if !relationship.transmission_enabled || relationship.is_broken {
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
// Score must be above threshold
|
||||
if relationship.score < relationship.threshold {
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
// Check transmission cooldown
|
||||
if let Some(last_transmission) = relationship.last_transmission {
|
||||
let hours_since = (Utc::now() - last_transmission).num_hours();
|
||||
if hours_since < 24 {
|
||||
return Ok(false);
|
||||
}
|
||||
}
|
||||
|
||||
// Calculate transmission probability based on relationship strength
|
||||
let base_probability = match relationship.status {
|
||||
RelationshipStatus::New => 0.1,
|
||||
RelationshipStatus::Acquaintance => 0.2,
|
||||
RelationshipStatus::Friend => 0.4,
|
||||
RelationshipStatus::CloseFriend => 0.6,
|
||||
RelationshipStatus::Broken => 0.0,
|
||||
};
|
||||
|
||||
// Modify probability based on fortune
|
||||
let state = persona.get_current_state()?;
|
||||
let fortune_modifier = (state.fortune_value as f64 - 5.0) / 10.0; // -0.4 to +0.5
|
||||
let final_probability = (base_probability + fortune_modifier).max(0.0).min(1.0);
|
||||
|
||||
// Simple random check (in real implementation, this would be more sophisticated)
|
||||
use std::collections::hash_map::DefaultHasher;
|
||||
use std::hash::{Hash, Hasher};
|
||||
|
||||
let mut hasher = DefaultHasher::new();
|
||||
user_id.hash(&mut hasher);
|
||||
Utc::now().timestamp().hash(&mut hasher);
|
||||
let hash = hasher.finish();
|
||||
let random_value = (hash % 100) as f64 / 100.0;
|
||||
|
||||
Ok(random_value < final_probability)
|
||||
}
|
||||
|
||||
async fn generate_autonomous_transmission(&mut self, persona: &mut Persona, user_id: &str) -> Result<TransmissionLog> {
|
||||
let now = Utc::now();
|
||||
|
||||
// Get recent memories for context
|
||||
let memories = persona.get_memories(user_id, 3);
|
||||
let context = if !memories.is_empty() {
|
||||
format!("Based on our recent conversations: {}", memories.join(", "))
|
||||
} else {
|
||||
"Starting a spontaneous conversation".to_string()
|
||||
};
|
||||
|
||||
// Generate message using AI if available
|
||||
let message = match self.generate_ai_message(persona, user_id, &context, TransmissionType::Autonomous).await {
|
||||
Ok(msg) => msg,
|
||||
Err(_) => {
|
||||
// Fallback to simple messages
|
||||
let fallback_messages = [
|
||||
"Hey! How have you been?",
|
||||
"Just thinking about our last conversation...",
|
||||
"Hope you're having a good day!",
|
||||
"Something interesting happened today and it reminded me of you.",
|
||||
];
|
||||
let index = (now.timestamp() as usize) % fallback_messages.len();
|
||||
fallback_messages[index].to_string()
|
||||
}
|
||||
};
|
||||
|
||||
let log = TransmissionLog {
|
||||
user_id: user_id.to_string(),
|
||||
message,
|
||||
timestamp: now,
|
||||
transmission_type: TransmissionType::Autonomous,
|
||||
success: true, // For now, assume success
|
||||
error: None,
|
||||
};
|
||||
|
||||
self.transmission_history.push(log.clone());
|
||||
Ok(log)
|
||||
}
|
||||
|
||||
async fn generate_breakthrough_transmission(&mut self, persona: &mut Persona, user_id: &str) -> Result<TransmissionLog> {
|
||||
let now = Utc::now();
|
||||
let state = persona.get_current_state()?;
|
||||
|
||||
let message = match self.generate_ai_message(persona, user_id, "Breakthrough moment - feeling inspired!", TransmissionType::Breakthrough).await {
|
||||
Ok(msg) => msg,
|
||||
Err(_) => {
|
||||
format!("Amazing day today! ⚡ Fortune is at {}/10 and I'm feeling incredibly inspired. Had to share this energy with you!", state.fortune_value)
|
||||
}
|
||||
};
|
||||
|
||||
let log = TransmissionLog {
|
||||
user_id: user_id.to_string(),
|
||||
message,
|
||||
timestamp: now,
|
||||
transmission_type: TransmissionType::Breakthrough,
|
||||
success: true,
|
||||
error: None,
|
||||
};
|
||||
|
||||
self.transmission_history.push(log.clone());
|
||||
Ok(log)
|
||||
}
|
||||
|
||||
async fn generate_maintenance_transmission(&mut self, persona: &mut Persona, user_id: &str) -> Result<TransmissionLog> {
|
||||
let now = Utc::now();
|
||||
|
||||
let message = match self.generate_ai_message(persona, user_id, "Maintenance check-in", TransmissionType::Maintenance).await {
|
||||
Ok(msg) => msg,
|
||||
Err(_) => {
|
||||
"Hey! It's been a while since we last talked. Just checking in to see how you're doing!".to_string()
|
||||
}
|
||||
};
|
||||
|
||||
let log = TransmissionLog {
|
||||
user_id: user_id.to_string(),
|
||||
message,
|
||||
timestamp: now,
|
||||
transmission_type: TransmissionType::Maintenance,
|
||||
success: true,
|
||||
error: None,
|
||||
};
|
||||
|
||||
self.transmission_history.push(log.clone());
|
||||
Ok(log)
|
||||
}
|
||||
|
||||
async fn generate_ai_message(&self, _persona: &mut Persona, _user_id: &str, context: &str, transmission_type: TransmissionType) -> Result<String> {
|
||||
// Try to use AI for message generation
|
||||
let _system_prompt = format!(
|
||||
"You are initiating a {} conversation. Context: {}. Keep the message casual, personal, and under 100 characters. Show genuine interest in the person.",
|
||||
transmission_type, context
|
||||
);
|
||||
|
||||
// This is a simplified version - in a real implementation, we'd use the AI provider
|
||||
// For now, return an error to trigger fallback
|
||||
Err(anyhow::anyhow!("AI provider not available for transmission generation"))
|
||||
}
|
||||
|
||||
fn get_eligible_relationships(&self, persona: &Persona) -> Vec<String> {
|
||||
persona.list_all_relationships().iter()
|
||||
.filter(|(_, rel)| rel.transmission_enabled && !rel.is_broken)
|
||||
.filter(|(_, rel)| rel.score >= rel.threshold)
|
||||
.map(|(id, _)| id.clone())
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn get_transmission_stats(&self) -> TransmissionStats {
|
||||
let total_transmissions = self.transmission_history.len();
|
||||
let successful_transmissions = self.transmission_history.iter()
|
||||
.filter(|log| log.success)
|
||||
.count();
|
||||
|
||||
let today = Utc::now().date_naive();
|
||||
let today_transmissions = self.transmission_history.iter()
|
||||
.filter(|log| log.timestamp.date_naive() == today)
|
||||
.count();
|
||||
|
||||
let by_type = {
|
||||
let mut counts = HashMap::new();
|
||||
for log in &self.transmission_history {
|
||||
*counts.entry(log.transmission_type.to_string()).or_insert(0) += 1;
|
||||
}
|
||||
counts
|
||||
};
|
||||
|
||||
TransmissionStats {
|
||||
total_transmissions,
|
||||
successful_transmissions,
|
||||
today_transmissions,
|
||||
success_rate: if total_transmissions > 0 {
|
||||
successful_transmissions as f64 / total_transmissions as f64
|
||||
} else {
|
||||
0.0
|
||||
},
|
||||
by_type,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_recent_transmissions(&self, limit: usize) -> Vec<&TransmissionLog> {
|
||||
let mut logs: Vec<_> = self.transmission_history.iter().collect();
|
||||
logs.sort_by(|a, b| b.timestamp.cmp(&a.timestamp));
|
||||
logs.into_iter().take(limit).collect()
|
||||
}
|
||||
|
||||
fn load_transmission_history(config: &Config) -> Result<Vec<TransmissionLog>> {
|
||||
let file_path = config.transmission_file();
|
||||
if !file_path.exists() {
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
|
||||
let content = std::fs::read_to_string(file_path)
|
||||
.context("Failed to read transmission history file")?;
|
||||
|
||||
let history: Vec<TransmissionLog> = serde_json::from_str(&content)
|
||||
.context("Failed to parse transmission history file")?;
|
||||
|
||||
Ok(history)
|
||||
}
|
||||
|
||||
fn save_transmission_history(&self) -> Result<()> {
|
||||
let content = serde_json::to_string_pretty(&self.transmission_history)
|
||||
.context("Failed to serialize transmission history")?;
|
||||
|
||||
std::fs::write(&self.config.transmission_file(), content)
|
||||
.context("Failed to write transmission history file")?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn check_and_send(&mut self) -> Result<Vec<(String, String)>> {
|
||||
let config = self.config.clone();
|
||||
let mut persona = Persona::new(&config)?;
|
||||
|
||||
let mut results = Vec::new();
|
||||
|
||||
// Check autonomous transmissions
|
||||
let autonomous = self.check_autonomous_transmissions(&mut persona).await?;
|
||||
for log in autonomous {
|
||||
if log.success {
|
||||
results.push((log.user_id, log.message));
|
||||
}
|
||||
}
|
||||
|
||||
// Check breakthrough transmissions
|
||||
let breakthrough = self.check_breakthrough_transmissions(&mut persona).await?;
|
||||
for log in breakthrough {
|
||||
if log.success {
|
||||
results.push((log.user_id, log.message));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(results)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct TransmissionStats {
|
||||
pub total_transmissions: usize,
|
||||
pub successful_transmissions: usize,
|
||||
pub today_transmissions: usize,
|
||||
pub success_rate: f64,
|
||||
pub by_type: HashMap<String, usize>,
|
||||
}
|
||||
Reference in New Issue
Block a user