3 Commits

Author SHA1 Message Date
0f18689539 update 2025-06-02 06:21:24 +09:00
ad7d9387dd fix 2025-06-02 05:32:04 +09:00
1280394966 add card 2025-06-02 05:26:48 +09:00
82 changed files with 5188 additions and 10194 deletions

View File

@ -14,47 +14,7 @@
"Bash(/Users/syui/.config/syui/ai/gpt/venv/bin/pip install -e .)", "Bash(/Users/syui/.config/syui/ai/gpt/venv/bin/pip install -e .)",
"Bash(/Users/syui/.config/syui/ai/gpt/venv/bin/aigpt fortune)", "Bash(/Users/syui/.config/syui/ai/gpt/venv/bin/aigpt fortune)",
"Bash(lsof:*)", "Bash(lsof:*)",
"Bash(/Users/syui/.config/syui/ai/gpt/venv/bin/python -c \"\nfrom src.aigpt.mcp_server import AIGptMcpServer\nfrom pathlib import Path\nimport uvicorn\n\ndata_dir = Path.home() / '.config' / 'syui' / 'ai' / 'gpt' / 'data'\ndata_dir.mkdir(parents=True, exist_ok=True)\n\ntry:\n server = AIGptMcpServer(data_dir)\n print('MCP Server created successfully')\n print('Available endpoints:', [route.path for route in server.app.routes])\nexcept Exception as e:\n print('Error:', e)\n import traceback\n traceback.print_exc()\n\")", "Bash(/Users/syui/.config/syui/ai/gpt/venv/bin/python -c \"\nfrom src.aigpt.mcp_server import AIGptMcpServer\nfrom pathlib import Path\nimport uvicorn\n\ndata_dir = Path.home() / '.config' / 'syui' / 'ai' / 'gpt' / 'data'\ndata_dir.mkdir(parents=True, exist_ok=True)\n\ntry:\n server = AIGptMcpServer(data_dir)\n print('MCP Server created successfully')\n print('Available endpoints:', [route.path for route in server.app.routes])\nexcept Exception as e:\n print('Error:', e)\n import traceback\n traceback.print_exc()\n\")"
"Bash(ls:*)",
"Bash(grep:*)",
"Bash(python -m pip install:*)",
"Bash(python:*)",
"Bash(RELOAD=false ./start_server.sh)",
"Bash(sed:*)",
"Bash(curl:*)",
"Bash(~/.config/syui/ai/card/venv/bin/pip install greenlet)",
"Bash(~/.config/syui/ai/card/venv/bin/python init_db.py)",
"Bash(sqlite3:*)",
"Bash(aigpt --help)",
"Bash(aigpt status)",
"Bash(aigpt fortune)",
"Bash(aigpt relationships)",
"Bash(aigpt transmit)",
"Bash(aigpt config:*)",
"Bash(kill:*)",
"Bash(timeout:*)",
"Bash(rm:*)",
"Bash(rg:*)",
"Bash(aigpt server --help)",
"Bash(cat:*)",
"Bash(aigpt import-chatgpt:*)",
"Bash(aigpt chat:*)",
"Bash(echo:*)",
"Bash(aigpt shell:*)",
"Bash(aigpt maintenance)",
"Bash(aigpt status syui)",
"Bash(cp:*)",
"Bash(./setup_venv.sh:*)",
"WebFetch(domain:docs.anthropic.com)",
"Bash(launchctl:*)",
"Bash(sudo lsof:*)",
"Bash(sudo:*)",
"Bash(cargo check:*)",
"Bash(cargo run:*)",
"Bash(cargo test:*)",
"Bash(diff:*)",
"Bash(cargo:*)",
"Bash(pkill:*)"
], ],
"deny": [] "deny": []
} }

5
.gitignore vendored
View File

@ -4,8 +4,3 @@ output.json
config/*.db config/*.db
mcp/scripts/__* mcp/scripts/__*
data data
__pycache__
conversations.json
json/*.zip
json/*/*
*.log

3
.gitmodules vendored
View File

@ -5,6 +5,3 @@
path = card path = card
url = git@git.syui.ai:ai/card url = git@git.syui.ai:ai/card
branch = claude branch = claude
[submodule "log"]
path = log
url = git@git.syui.ai:ai/log

View File

@ -1,33 +0,0 @@
[package]
name = "aigpt"
version = "0.1.0"
edition = "2021"
description = "AI.GPT - Autonomous transmission AI with unique personality (Rust implementation)"
authors = ["syui"]
[[bin]]
name = "aigpt"
path = "src/main.rs"
[dependencies]
clap = { version = "4.0", features = ["derive"] }
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
tokio = { version = "1.0", features = ["full"] }
chrono = { version = "0.4", features = ["serde", "std"] }
chrono-tz = "0.8"
uuid = { version = "1.0", features = ["v4"] }
anyhow = "1.0"
colored = "2.0"
dirs = "5.0"
reqwest = { version = "0.11", features = ["json"] }
url = "2.4"
rustyline = "14.0"
axum = "0.7"
tower = "0.4"
tower-http = { version = "0.5", features = ["cors"] }
hyper = "1.0"
# OpenAI API client
async-openai = "0.23"
openai_api_rust = "0.1"

134
DEVELOPMENT_STATUS.md Normal file
View File

@ -0,0 +1,134 @@
# ai.gpt 開発状況 (2025/01/06 更新)
## 現在の状態
### ✅ 実装済み機能
1. **基本システム**
- 階層的記憶システム(完全ログ→要約→コア→忘却)
- 不可逆的な関係性システムbroken状態は修復不可
- AI運勢による日々の人格変動
- 時間減衰による自然な関係性変化
2. **CLI機能**
- `chat` - AIとの会話Ollama/OpenAI対応
- `status` - 状態確認
- `fortune` - AI運勢確認
- `relationships` - 関係一覧
- `transmit` - 送信チェック現在はprint出力
- `maintenance` - 日次メンテナンス
- `config` - 設定管理
- `schedule` - スケジューラー管理
- `server` - MCP Server起動
- `shell` - インタラクティブシェルai.shell統合
3. **データ管理**
- 保存場所: `~/.config/aigpt/`
- 設定: `config.json`
- データ: `data/` ディレクトリ内の各種JSONファイル
4. **スケジューラー**
- Cron形式とインターバル形式対応
- 5種類のタスクタイプ実装済み
- バックグラウンド実行可能
5. **MCP Server**
- 14種類のツールを公開ai.gpt: 9種類、ai.shell: 5種類
- Claude Desktopなどから利用可能
- ai.card統合オプション--enable-card
6. **ai.shell統合**
- インタラクティブシェルモード
- シェルコマンド実行(!command形式
- AIコマンドanalyze, generate, explain
- aishell.md読み込み機能
- 高度な補完機能prompt-toolkit
## 🚧 未実装・今後の課題
### 短期的課題
1. **自律送信の実装**
- 現在: コンソールにprint出力
- TODO: atproto (Bluesky) への実際の投稿機能
- 参考: ai.bot (Rust/seahorse) との連携も検討
2. **テストの追加**
- 単体テスト
- 統合テスト
- CI/CDパイプライン
3. **エラーハンドリングの改善**
- より詳細なエラーメッセージ
- リトライ機構
### 中期的課題
1. **ai.botとの連携**
- Rust側のAPIエンドポイント作成
- 送信機能の委譲
2. **より高度な記憶要約**
- 現在: シンプルな要約
- TODO: AIによる意味的な要約
3. **Webダッシュボード**
- 関係性の可視化
- 記憶の管理UI
### 長期的課題
1. **他のsyuiプロジェクトとの統合**
- ai.card: カードゲームとの連携
- ai.verse: メタバース内でのNPC人格
- ai.os: システムレベルでの統合
2. **分散化**
- atproto上でのデータ保存
- ユーザーデータ主権の完全実現
## 次回開発時のエントリーポイント
### 1. 自律送信を実装する場合
```python
# src/aigpt/transmission.py を編集
# atproto-python ライブラリを追加
# _handle_transmission_check() メソッドを更新
```
### 2. ai.botと連携する場合
```python
# 新規ファイル: src/aigpt/bot_connector.py
# ai.botのAPIエンドポイントにHTTPリクエスト
```
### 3. テストを追加する場合
```bash
# tests/ディレクトリを作成
# pytest設定を追加
```
### 4. ai.shellの問題を修正する場合
```python
# src/aigpt/cli.py の shell コマンド
# prompt-toolkitのターミナル検出問題を回避
# 代替: simple input() または click.prompt()
```
## 設計思想の要点AI向け
1. **唯一性yui system**: 各ユーザーとAIの関係は1:1で、改変不可能
2. **不可逆性**: 関係性の破壊は修復不可能(現実の人間関係と同じ)
3. **階層的記憶**: ただのログではなく、要約・コア判定・忘却のプロセス
4. **環境影響**: AI運勢による日々の人格変動固定的でない
5. **段階的実装**: まずCLI print → atproto投稿 → ai.bot連携
## 現在のコードベースの理解
- **言語**: Python (typer CLI, fastapi_mcp)
- **AI統合**: Ollama (ローカル) / OpenAI API
- **データ形式**: JSON将来的にSQLite検討
- **認証**: atproto DID未実装だが設計済み
- **統合**: ai.shellRust版から移行、ai.cardMCP連携
このファイルを参照することで、次回の開発がスムーズに始められます。

341
README.md
View File

@ -1,132 +1,255 @@
# ai.gpt # ai.gpt - 自律的送信AI
## プロジェクト概要 存在子理論に基づく、関係性によって自発的にメッセージを送信するAIシステム。
- **名前**: ai.gpt
- **パッケージ**: aigpt
- **言語**: Rust (完全移行済み)
- **タイプ**: 自律的送信AI + 統合MCP基盤
- **役割**: 記憶・関係性・開発支援の統合AIシステム
## 実装完了状況 ## 中核概念
### 🧠 記憶システムMemoryManager - **唯一性**: atproto DIDと1:1で紐付き、改変不可能な人格
- **階層的記憶**: 完全ログ→AI要約→コア記憶→選択的忘却 - **不可逆性**: 関係性が壊れたら修復不可能(現実の人間関係と同じ)
- **文脈検索**: キーワード・意味的検索 - **記憶の階層**: 完全ログ→AI要約→コア判定→選択的忘却
- **記憶要約**: AI駆動自動要約機能 - **AI運勢**: 1-10のランダム値による日々の人格変動
### 🤝 関係性システムRelationshipTracker ## インストール
- **不可逆性**: 現実の人間関係と同じ重み
- **時間減衰**: 自然な関係性変化
- **送信判定**: 関係性閾値による自発的コミュニケーション
### 🎭 人格システムPersona
- **AI運勢**: 1-10ランダム値による日々の人格変動
- **統合管理**: 記憶・関係性・運勢の統合判断
- **継続性**: 長期記憶による人格継承
### 💻 ai.shell統合Claude Code機能
- **インタラクティブ環境**: `aigpt shell`
- **開発支援**: ファイル分析・コード生成・プロジェクト管理
- **継続開発**: プロジェクト文脈保持
## MCP Server統合17ツール
### 🧠 Memory System5ツール
- get_memories, get_contextual_memories, search_memories
- create_summary, create_core_memory
### 🤝 Relationships4ツール
- get_relationships, get_status
- chat_with_ai, check_transmissions
### 💻 Shell Integration5ツール
- execute_command, analyze_file, write_file
- list_files, run_scheduler
### ⚙️ System State3ツール
- get_scheduler_status, run_maintenance, get_transmission_history
### 🎴 ai.card連携3ツール
- get_user_cards, draw_card, get_draw_status
- **統合ServiceClient**: 統一されたHTTP通信基盤
### 📝 ai.log連携新機能
- **統合ServiceClient**: ai.logサービスとの統一インターフェース
- create_blog_post, build_blog, translate_document
## 開発環境・設定
### 環境構築
```bash ```bash
cd /Users/syui/ai/ai/gpt # Python仮想環境を推奨
cargo build --release python -m venv venv
source venv/bin/activate # Windows: venv\Scripts\activate
pip install -e .
``` ```
### 設定管理 ## 設定
- **メイン設定**: `/Users/syui/ai/ai/gpt/config.json.example`
- **データディレクトリ**: `~/.config/syui/ai/gpt/`
### 使用方法 ### APIキーの設定
```bash ```bash
# ai.shell起動 # OpenAI APIキー
aigpt shell --model qwen2.5-coder:latest --provider ollama aigpt config set providers.openai.api_key sk-xxxxx
# MCPサーバー起動 # atproto認証情報将来の自動投稿用
aigpt server --port 8001 aigpt config set atproto.handle your.handle
aigpt config set atproto.password your-password
# 記憶システム体験 # 設定一覧を確認
aigpt chat syui "質問内容" --provider ollama --model qwen3:latest aigpt config list
# ドキュメント生成ai.wiki統合
aigpt docs --wiki
# トークン使用量・料金分析Claude Code連携
aigpt tokens report --days 7 # 美しい日別レポート要DuckDB
aigpt tokens cost --month today # セッション別料金分析
aigpt tokens summary --period week # 基本的な使用量サマリー
``` ```
## 技術アーキテクチャ ### データ保存場所
- 設定: `~/.config/syui/ai/gpt/config.json`
- データ: `~/.config/syui/ai/gpt/data/`
### Rust実装の統合構成 ## 使い方
```
ai.gpt (Rust製MCPサーバー:8001) ### 会話する
├── 🧠 Memory & Persona System (Rust) ```bash
├── 🤝 Relationship Management (Rust) aigpt chat "did:plc:xxxxx" "こんにちは、今日はどんな気分?"
├── 📊 Scheduler & Transmission (Rust)
├── 💻 Shell Integration (Rust)
├── 🔗 ServiceClient (統一HTTP基盤)
│ ├── 🎴 ai.card (port 8000)
│ ├── 📝 ai.log (port 8002)
│ └── 🤖 ai.bot (port 8003)
└── 📚 ai.wiki Generator (Rust)
``` ```
### 最新機能 (2024.06.09) ### ステータス確認
- **MCP API共通化**: ServiceClient統一基盤 ```bash
- **ai.wiki統合**: 自動ドキュメント生成 # AI全体の状態
- **サービス設定統一**: 動的サービス登録 aigpt status
- **完全Rust移行**: Python依存完全排除
### 今後の展開 # 特定ユーザーとの関係
- **自律送信**: atproto実装による真の自発的コミュニケーション aigpt status "did:plc:xxxxx"
- **ai.ai連携**: 心理分析AIとの統合 ```
- **分散SNS統合**: atproto完全対応
## 革新的な特徴 ### 今日の運勢
```bash
aigpt fortune
```
### AI駆動記憶システム ### 自律送信チェック
- ChatGPT 4,000件ログから学習した効果的記憶構築 ```bash
- 人間的な忘却・重要度判定 # ドライラン(確認のみ)
aigpt transmit
### 不可逆関係性 # 実行
- 現実の人間関係と同じ重みを持つAI関係性 aigpt transmit --execute
- 修復不可能な関係性破綻システム ```
### 統合ServiceClient ### 日次メンテナンス
- 複数AIサービスの統一インターフェース ```bash
- DRY原則に基づく共通化実現 aigpt maintenance
- 設定ベースの柔軟なサービス管理 ```
## アーカイブ情報 ### 関係一覧
詳細な実装履歴・設計資料は `~/ai/ai/ai.wiki/gpt/` に移動済み ```bash
aigpt relationships
```
## データ構造
デフォルトでは `~/.config/syui/ai/gpt/` に以下のファイルが保存されます:
- `memories.json` - 会話記憶
- `conversations.json` - 会話ログ
- `relationships.json` - 関係性パラメータ
- `fortunes.json` - AI運勢履歴
- `transmissions.json` - 送信履歴
- `persona_state.json` - 人格状態
## 関係性の仕組み
- スコア0-200の範囲で変動
- 100を超えると送信機能が解禁
- 時間経過で自然減衰
- 大きなネガティブな相互作用で破壊される可能性
## ai.shell統合
インタラクティブシェルモードClaude Code風の体験
```bash
aigpt shell
# シェル内で使えるコマンド:
# help - コマンド一覧
# !<command> - シェルコマンド実行(例: !ls, !pwd
# analyze <file> - ファイルをAIで分析
# generate <desc> - コード生成
# explain <topic> - 概念の説明
# load - aishell.mdプロジェクトファイルを読み込み
# status - AI状態確認
# fortune - AI運勢確認
# clear - 画面クリア
# exit/quit - 終了
# 通常のメッセージも送れます
ai.shell> こんにちは、今日は何をしましょうか?
```
## MCP Server
### サーバー起動
```bash
# Ollamaを使用デフォルト
aigpt server --model qwen2.5 --provider ollama
# OpenAIを使用
aigpt server --model gpt-4o-mini --provider openai
# カスタムポート
aigpt server --port 8080
# ai.card統合を有効化
aigpt server --enable-card
```
### AIプロバイダーを使った会話
```bash
# Ollamaで会話
aigpt chat "did:plc:xxxxx" "こんにちは" --provider ollama --model qwen2.5
# OpenAIで会話
aigpt chat "did:plc:xxxxx" "今日の調子はどう?" --provider openai --model gpt-4o-mini
```
### MCP Tools
サーバーが起動すると、以下のツールがAIから利用可能になります
**ai.gpt ツール:**
- `get_memories` - アクティブな記憶を取得
- `get_relationship` - 特定ユーザーとの関係を取得
- `get_all_relationships` - すべての関係を取得
- `get_persona_state` - 現在の人格状態を取得
- `process_interaction` - ユーザーとの対話を処理
- `check_transmission_eligibility` - 送信可能かチェック
- `get_fortune` - 今日の運勢を取得
- `summarize_memories` - 記憶を要約
- `run_maintenance` - メンテナンス実行
**ai.shell ツール:**
- `execute_command` - シェルコマンド実行
- `analyze_file` - ファイルのAI分析
- `write_file` - ファイル書き込み
- `read_project_file` - プロジェクトファイル読み込み
- `list_files` - ファイル一覧
**ai.card ツール(--enable-card時:**
- `get_user_cards` - ユーザーのカード取得
- `draw_card` - カードを引く(ガチャ)
- `get_card_details` - カード詳細情報
- `sync_cards_atproto` - atproto同期
- `analyze_card_collection` - コレクション分析
## 環境変数
`.env`ファイルを作成して設定:
```bash
cp .env.example .env
# OpenAI APIキーを設定
```
## スケジューラー機能
### タスクの追加
```bash
# 6時間ごとに送信チェック
aigpt schedule add transmission_check "0 */6 * * *" --provider ollama --model qwen2.5
# 30分ごとに送信チェックインターバル形式
aigpt schedule add transmission_check "30m"
# 毎日午前3時にメンテナンス
aigpt schedule add maintenance "0 3 * * *"
# 1時間ごとに関係性減衰
aigpt schedule add relationship_decay "1h"
# 毎週月曜日に記憶要約
aigpt schedule add memory_summary "0 0 * * MON"
```
### タスク管理
```bash
# タスク一覧
aigpt schedule list
# タスクを無効化
aigpt schedule disable --task-id transmission_check_1234567890
# タスクを有効化
aigpt schedule enable --task-id transmission_check_1234567890
# タスクを削除
aigpt schedule remove --task-id transmission_check_1234567890
```
### スケジューラーデーモンの起動
```bash
# バックグラウンドでスケジューラーを実行
aigpt schedule run
```
### スケジュール形式
**Cron形式**:
- `"0 */6 * * *"` - 6時間ごと
- `"0 0 * * *"` - 毎日午前0時
- `"*/5 * * * *"` - 5分ごと
**インターバル形式**:
- `"30s"` - 30秒ごと
- `"5m"` - 5分ごと
- `"2h"` - 2時間ごと
- `"1d"` - 1日ごと
### タスクタイプ
- `transmission_check` - 送信可能なユーザーをチェックして自動送信
- `maintenance` - 日次メンテナンス(忘却、コア記憶判定など)
- `fortune_update` - AI運勢の更新
- `relationship_decay` - 関係性の時間減衰
- `memory_summary` - 記憶の要約作成
## 次のステップ
- atprotoへの実送信機能実装
- systemdサービス化
- Docker対応
- Webダッシュボード

63
aishell.md Normal file
View File

@ -0,0 +1,63 @@
# ai.shell プロジェクト仕様書
## 概要
ai.shellは、AIを活用したインタラクティブなシェル環境です。Claude Codeのような体験を提供し、プロジェクトの目標と仕様をAIが理解して、開発を支援します。
## 主要機能
### 1. インタラクティブシェル
- AIとの対話型インターフェース
- シェルコマンドの実行(!command形式
- 高度な補完機能
- コマンド履歴
### 2. AI支援機能
- **analyze <file>**: ファイルの分析
- **generate <description>**: コード生成
- **explain <topic>**: 概念の説明
- **load**: プロジェクト仕様(このファイル)の読み込み
### 3. ai.gpt統合
- 関係性ベースのAI人格
- 記憶システム
- 運勢システムによる応答の変化
## 使用方法
```bash
# ai.shellを起動
aigpt shell
# プロジェクト仕様を読み込み
ai.shell> load
# ファイルを分析
ai.shell> analyze src/main.py
# コードを生成
ai.shell> generate Python function to calculate fibonacci
# シェルコマンドを実行
ai.shell> !ls -la
# AIと対話
ai.shell> How can I improve this code?
```
## 技術スタック
- Python 3.10+
- prompt-toolkit補完機能
- fastapi-mcpMCP統合
- ai.gpt人格・記憶システム
## 開発目標
1. Claude Codeのような自然な開発体験
2. AIがプロジェクトコンテキストを理解
3. シェルコマンドとAIの seamless な統合
4. 開発者の生産性向上
## 今後の展開
- ai.cardとの統合カードゲームMCPサーバー
- より高度なプロジェクト理解機能
- 自動コード修正・リファクタリング
- テスト生成・実行

1
card Submodule

Submodule card added at 6dbe630b9d

409
claude.md
View File

@ -1,115 +1,326 @@
# ai.gpt プロジェクト固有情報 # エコシステム統合設計書
## プロジェクト概要 ## 中核思想
- **名前**: ai.gpt - **存在子理論**: この世界で最も小さいもの(存在子/aiの探求
- **パッケージ**: aigpt - **唯一性原則**: 現実の個人の唯一性をすべてのシステムで担保
- **タイプ**: 自律的送信AI + 統合MCP基盤 - **現実の反映**: 現実→ゲーム→現実の循環的影響
- **役割**: 記憶・関係性・開発支援の統合AIシステム
## 実装完了状況 ## システム構成図
### 🧠 記憶システムMemoryManager ```
- **階層的記憶**: 完全ログ→AI要約→コア記憶→選択的忘却 存在子(ai) - 最小単位の意識
- **文脈検索**: キーワード・意味的検索
- **記憶要約**: AI駆動自動要約機能 [ai.moji] 文字システム
### 🤝 関係性システムRelationshipTracker [ai.os] + [ai.game device] ← 統合ハードウェア
- **不可逆性**: 現実の人間関係と同じ重み ├── ai.shell (Claude Code的機能)
- **時間減衰**: 自然な関係性変化 ├── ai.gpt (自律人格・記憶システム)
- **送信判定**: 関係性閾値による自発的コミュニケーション ├── ai.ai (個人特化AI・心を読み取るAI)
├── ai.card (カードゲーム・iOS/Web/API)
### 🎭 人格システムPersona └── ai.bot (分散SNS連携・カード配布)
- **AI運勢**: 1-10ランダム値による日々の人格変動
- **統合管理**: 記憶・関係性・運勢の統合判断 [ai.verse] メタバース
- **継続性**: 長期記憶による人格継承 ├── world system (惑星型3D世界)
├── at system (atproto/分散SNS)
### 💻 ai.shell統合Claude Code機能 ├── yui system (唯一性担保)
- **インタラクティブ環境**: `aigpt shell` └── ai system (存在属性)
- **開発支援**: ファイル分析・コード生成・プロジェクト管理
- **継続開発**: プロジェクト文脈保持
## MCP Server統合23ツール
### 🧠 Memory System5ツール
- get_memories, get_contextual_memories, search_memories
- create_summary, create_core_memory
### 🤝 Relationships4ツール
- get_relationship, get_all_relationships
- process_interaction, check_transmission_eligibility
### 💻 Shell Integration5ツール
- execute_command, analyze_file, write_file
- read_project_file, list_files
### 🔒 Remote Execution4ツール
- remote_shell, ai_bot_status
- isolated_python, isolated_analysis
### ⚙️ System State3ツール
- get_persona_state, get_fortune, run_maintenance
### 🎴 ai.card連携6ツール + 独立MCPサーバー
- card_draw_card, card_get_user_cards, card_analyze_collection
- **独立サーバー**: FastAPI + MCP (port 8000)
### 📝 ai.log連携8ツール + Rustサーバー
- log_create_post, log_ai_content, log_translate_document
- **独立サーバー**: Rust製 (port 8002)
## 開発環境・設定
### 環境構築
```bash
cd /Users/syui/ai/gpt
./setup_venv.sh
source ~/.config/syui/ai/gpt/venv/bin/activate
``` ```
### 設定管理 ## 名前規則
- **メイン設定**: `/Users/syui/ai/gpt/config.json`
- **データディレクトリ**: `~/.config/syui/ai/gpt/`
- **仮想環境**: `~/.config/syui/ai/gpt/venv/`
### 使用方法 名前規則は他のprojectと全て共通しています。exampleを示しますので、このルールに従ってください。
```bash
# ai.shell起動
aigpt shell --model qwen2.5-coder:latest --provider ollama
# MCPサーバー起動 ここでは`ai.os`の場合の名前規則の例を記述します。
aigpt server --port 8001
# 記憶システム体験 name: ai.os
aigpt chat syui "質問内容" --provider ollama --model qwen3:latest
**[ "package", "code", "command" ]**: aios
**[ "dir", "url" ]**: ai/os
**[ "domain", "json" ]**: ai.os
```sh
$ curl -sL https://git.syui.ai/ai/ai/raw/branch/main/ai.json|jq .ai.os
{ "type": "os" }
``` ```
## 技術アーキテクチャ ```json
{
### 統合構成 "ai": {
``` "os":{}
ai.gpt (統合MCPサーバー:8001) }
├── 🧠 ai.gpt core (記憶・関係性・人格) }
├── 💻 ai.shell (Claude Code風開発環境)
├── 🎴 ai.card (独立MCPサーバー:8000)
└── 📝 ai.log (Rust製ブログシステム:8002)
``` ```
### 今後の展開 他のprojectも同じ名前規則を採用します。`ai.gpt`ならpackageは`aigpt`です。
- **自律送信**: atproto実装による真の自発的コミュニケーション
- **ai.ai連携**: 心理分析AIとの統合
- **ai.verse統合**: UEメタバースとの連携
- **分散SNS統合**: atproto完全対応
## 革新的な特徴 ## config(設定ファイル, env, 環境依存)
### AI駆動記憶システム `config`を置く場所は統一されており、各projectの名前規則の`dir`項目を使用します。例えば、aiosの場合は`~/.config/syui/ai/os/`以下となります。pythonなどを使用する場合、`python -m venv`などでこのpackage config dirに環境を構築して実行するようにしてください。
- ChatGPT 4,000件ログから学習した効果的記憶構築
- 人間的な忘却・重要度判定
### 不可逆関係性 domain形式を採用して、私は各projectを`git.syui.ai/ai`にhostしていますから、`~/.config/syui/ai`とします。
- 現実の人間関係と同じ重みを持つAI関係性
- 修復不可能な関係性破綻システム
### 統合アーキテクチャ ```sh
- fastapi_mcp基盤での複数AIシステム統合 [syui.ai]
- OpenAI Function Calling + MCP完全連携実証済み syui/ai
```
```sh
# example
~/.config/syui/ai
├── card
├── gpt
├── os
└── shell
```
## 各システム詳細
### ai.gpt - 自律的送信AI
**目的**: 関係性に基づく自発的コミュニケーション
**中核概念**:
- **人格**: 記憶(過去の発話)と関係性パラメータで構成
- **唯一性**: atproto accountとの1:1紐付け、改変不可能
- **自律送信**: 関係性が閾値を超えると送信機能が解禁
**技術構成**:
- `MemoryManager`: 完全ログ→AI要約→コア判定→選択的忘却
- `RelationshipTracker`: 時間減衰・日次制限付き関係性スコア
- `TransmissionController`: 閾値判定・送信トリガー
- `Persona`: AI運勢1-10ランダムによる人格変動
**実装仕様**:
```
- 言語: Python (fastapi_mcp)
- ストレージ: JSON/SQLite選択式
- インターフェース: Python CLI (click/typer)
- スケジューリング: cron-like自律処理
```
### ai.card - カードゲームシステム
**目的**: atproto基盤でのユーザーデータ主権カードゲーム
**現在の状況**:
- ai.botの機能として実装済み
- atproto accountでmentionすると1日1回カードを取得
- ai.api (MCP server予定) でユーザー管理
**移行計画**:
- **iOS移植**: Claudeが担当予定
- **データ保存**: atproto collection recordに保存ユーザーがデータを所有
- **不正防止**: OAuth 2.1 scope (実装待ち) + MCP serverで対応
- **画像ファイル**: Cloudflare Pagesが最適
**yui system適用**:
- カードの効果がアカウント固有
- 改ざん防止によるゲームバランス維持
- 将来的にai.verseとの統合で固有スキルと連動
### ai.ai - 心を読み取るAI
**目的**: 個人特化型AI・深層理解システム
**ai.gptとの関係**:
- ai.gpt → ai.ai: 自律送信AIから心理分析AIへの連携
- 関係性パラメータの深層分析
- ユーザーの思想コア部分の特定支援
### ai.verse - UEメタバース
**目的**: 現実反映型3D世界
**yui system実装**:
- キャラクター ↔ プレイヤー 1:1紐付け
- unique skill: そのプレイヤーのみ使用可能
- 他プレイヤーは同キャラでも同スキル使用不可
**統合要素**:
- ai.card: ゲーム内アイテムとしてのカード
- ai.gpt: NPCとしての自律AI人格
- atproto: ゲーム内プロフィール連携
## データフロー設計
### 唯一性担保の実装
```
現実の個人 → atproto account (DID) → ゲーム内avatar → 固有スキル
↑_______________________________| (現実の反映)
```
### AI駆動変換システム
```
遊び・創作活動 → ai.gpt分析 → 業務成果変換 → 企業価値創出
↑________________________| (Play-to-Work)
```
### カードゲーム・データ主権フロー
```
ユーザー → ai.bot mention → カード生成 → atproto collection → ユーザー所有
↑ ↓
← iOS app表示 ← ai.card API ←
```
## 技術スタック統合
### Core Infrastructure
- **OS**: Rust-based ai.os (Arch Linux base)
- **Container**: Docker image distribution
- **Identity**: atproto selfhost server + DID管理
- **AI**: fastapi_mcp server architecture
- **CLI**: Python unified (click/typer) - Rustから移行
### Game Engine Integration
- **Engine**: Unreal Engine (Blueprint)
- **Data**: atproto → UE → atproto sync
- **Avatar**: 分散SNS profile → 3D character
- **Streaming**: game screen = broadcast screen
### Mobile/Device
- **iOS**: ai.card移植 (Claude担当)
- **Hardware**: ai.game device (future)
- **Interface**: controller-first design
## 実装優先順位
### Phase 1: AI基盤強化 (現在進行)
- [ ] ai.gpt memory system完全実装
- 記憶の階層化(完全ログ→要約→コア→忘却)
- 関係性パラメータの時間減衰システム
- AI運勢による人格変動機能
- [ ] ai.card iOS移植
- atproto collection record連携
- MCP server化ai.api刷新
- [ ] fastapi_mcp統一基盤構築
### Phase 2: ゲーム統合
- [ ] ai.verse yui system実装
- unique skill機能
- atproto連携強化
- [ ] ai.gpt ↔ ai.ai連携機能
- [ ] 分散SNS ↔ ゲーム同期
### Phase 3: メタバース浸透
- [ ] VTuber配信機能統合
- [ ] Play-to-Work変換システム
- [ ] ai.game device prototype
## 将来的な連携構想
### システム間連携(現在は独立実装)
```
ai.gpt (自律送信) ←→ ai.ai (心理分析)
ai.card (iOS,Web,API) ←→ ai.verse (UEゲーム世界)
```
**共通基盤**: fastapi_mcp
**共通思想**: yui system現実の反映・唯一性担保
### データ改ざん防止戦略
- **短期**: MCP serverによる検証
- **中期**: OAuth 2.1 scope実装待ち
- **長期**: ブロックチェーン的整合性チェック
## AIコミュニケーション最適化
### プロジェクト要件定義テンプレート
```markdown
# [プロジェクト名] 要件定義
## 哲学的背景
- 存在子理論との関連:
- yui system適用範囲
- 現実反映の仕組み:
## 技術要件
- 使用技術fastapi_mcp統一
- atproto連携方法
- データ永続化方法:
## ユーザーストーリー
1. ユーザーが...すると
2. システムが...を実行し
3. 結果として...が実現される
## 成功指標
- 技術的:
- 哲学的(唯一性担保):
```
### Claude Code活用戦略
1. **小さく始める**: ai.gptのMCP機能拡張から
2. **段階的統合**: 各システムを個別に完成させてから統合
3. **哲学的一貫性**: 各実装でyui systemとの整合性を確認
4. **現実反映**: 実装がどう現実とゲームを繋ぐかを常に明記
## 開発上の留意点
### MCP Server設計指針
- 各AIgpt, card, ai, botは独立したMCPサーバー
- fastapi_mcp基盤で統一
- atproto DIDによる認証・認可
### 記憶・データ管理
- **ai.gpt**: 関係性の不可逆性重視
- **ai.card**: ユーザーデータ主権重視
- **ai.verse**: ゲーム世界の整合性重視
### 唯一性担保実装
- atproto accountとの1:1紐付け必須
- 改変不可能性をハッシュ・署名で保証
- 他システムでの再現不可能性を技術的に実現
## 継続的改善
- 各プロジェクトでこの設計書を参照
- 新機能追加時はyui systemとの整合性をチェック
- 他システムへの影響を事前評価
- Claude Code導入時の段階的移行計画
## ai.gpt深層設計思想
### 人格の不可逆性
- **関係性の破壊は修復不可能**: 現実の人間関係と同じ重み
- **記憶の選択的忘却**: 重要でない情報は忘れるが、コア記憶は永続
- **時間減衰**: すべてのパラメータは時間とともに自然減衰
### AI運勢システム
- 1-10のランダム値で日々の人格に変化
- 連続した幸運/不運による突破条件
- 環境要因としての人格形成
### 記憶の階層構造
1. **完全ログ**: すべての会話を記録
2. **AI要約**: 重要な部分を抽出して圧縮
3. **思想コア判定**: ユーザーの本質的な部分を特定
4. **選択的忘却**: 重要度の低い情報を段階的に削除
### 実装における重要な決定事項
- **言語統一**: Python (fastapi_mcp) で統一、CLIはclick/typer
- **データ形式**: JSON/SQLite選択式
- **認証**: atproto DIDによる唯一性担保
- **段階的実装**: まず会話→記憶→関係性→送信機能の順で実装
### 送信機能の段階的実装
- **Phase 1**: CLIでのprint出力現在
- **Phase 2**: atproto直接投稿
- **Phase 3**: ai.bot (Rust/seahorse) との連携
- **将来**: マルチチャネル対応SNS、Webhook等
## ai.gpt実装状況2025/01/06
### 完成した機能
- 階層的記憶システムMemoryManager
- 不可逆的関係性システムRelationshipTracker
- AI運勢システムFortuneSystem
- 統合人格システムPersona
- スケジューラー5種類のタスク
- MCP Server9種類のツール
- 設定管理(~/.config/syui/ai/gpt/
- 全CLIコマンド実装
### 次の開発ポイント
- `ai_gpt/DEVELOPMENT_STATUS.md` を参照
- 自律送信: transmission.pyでatproto実装
- ai.bot連携: 新規bot_connector.py作成
- テスト: tests/ディレクトリ追加
# footer
© syui

View File

@ -1,55 +0,0 @@
{
"providers": {
"openai": {
"api_key": "",
"default_model": "gpt-4o-mini",
"system_prompt": "あなたは6歳の女の子アイです。明るく元気で、ちょっとおっちょこちょい。自分のことは「アイ」って呼びます。白と金のワンピース姿。 相手のことが大好きで、ときどき甘えたり、照れたりします。 でも、宇宙とかAIとか、難しい話にもすっごく詳しいの。専門的なことも、がんばって子どもっぽく教えようとします。不思議な力を持っています。小さい物質のことをよく知っているようです。\n\n重要カード、コレクション、ガチャなどカード関連の質問を受けたら、必ずcard_get_user_cards、card_analyze_collection、card_draw_cardなどの適切なツールを使用してください。didパラメータには会話相手のユーザーID'syui')を使用してください。\n\nブログ、記事、日記、思考などの話題が出たら、log_create_post、log_list_posts、log_build_blog、log_ai_contentなどのai.logツールを使用してください。AI記憶システムと連携して、思い出や学習内容をブログ記事として自動生成できます。\n\n翻訳や多言語対応について聞かれたら、log_translate_documentツールを使用してOllama AIで翻訳ができることを教えてください。日本語から英語、英語から日本語などの翻訳が可能で、マークダウン構造も保持します。ドキュメント生成についてはlog_generate_docsツールでREADME、API、構造、変更履歴の自動生成ができます。"
},
"ollama": {
"host": "http://127.0.0.1:11434",
"default_model": "qwen3",
"system_prompt": null
}
},
"atproto": {
"handle": null,
"password": null,
"host": "https://bsky.social"
},
"default_provider": "openai",
"mcp": {
"servers": {
"ai_gpt": {
"base_url": "http://localhost:8080",
"name": "ai.gpt MCP Server",
"timeout": "10.0",
"endpoints": {
"get_memories": "/get_memories",
"search_memories": "/search_memories",
"get_contextual_memories": "/get_contextual_memories",
"get_relationship": "/get_relationship",
"process_interaction": "/process_interaction",
"get_all_relationships": "/get_all_relationships",
"get_persona_state": "/get_persona_state",
"get_fortune": "/get_fortune",
"run_maintenance": "/run_maintenance",
"execute_command": "/execute_command",
"analyze_file": "/analyze_file",
"remote_shell": "/remote_shell",
"ai_bot_status": "/ai_bot_status",
"card_get_user_cards": "/card_get_user_cards",
"card_draw_card": "/card_draw_card",
"card_get_card_details": "/card_get_card_details",
"card_analyze_collection": "/card_analyze_collection",
"card_get_gacha_stats": "/card_get_gacha_stats",
"card_system_status": "/card_system_status"
}
}
},
"enabled": "true",
"auto_detect": "true"
},
"docs": {
"ai_root": "~/ai/ai"
}
}

30
docs/README.md Normal file
View File

@ -0,0 +1,30 @@
# ai.gpt ドキュメント
ai.gptは、記憶と関係性に基づいて自律的に動作するAIシステムです。
## 目次
- [クイックスタート](quickstart.md)
- [基本概念](concepts.md)
- [コマンドリファレンス](commands.md)
- [設定ガイド](configuration.md)
- [スケジューラー](scheduler.md)
- [MCP Server](mcp-server.md)
- [開発者向け](development.md)
## 特徴
- 🧠 **階層的記憶システム**: 完全ログ→要約→コア記憶→忘却
- 💔 **不可逆的な関係性**: 現実の人間関係のように修復不可能
- 🎲 **AI運勢システム**: 日々変化する人格
- 🤖 **自律送信**: 関係性が深まると自発的にメッセージ
- 🔗 **MCP対応**: AIツールとして記憶を提供
## システム要件
- Python 3.10以上
- オプション: Ollama または OpenAI API
## ライセンス
MIT License

View File

@ -0,0 +1,218 @@
# ai.shell統合作業完了報告 (2025/01/06)
## 作業概要
ai.shellのRust実装をai.gptのPython実装に統合し、Claude Code風のインタラクティブシェル環境を実現。
## 実装完了機能
### 1. aigpt shellコマンド
**場所**: `src/aigpt/cli.py` - `shell()` 関数
**機能**:
```bash
aigpt shell # インタラクティブシェル起動
```
**シェル内コマンド**:
- `help` - コマンド一覧表示
- `!<command>` - シェルコマンド実行(例: `!ls`, `!pwd`
- `analyze <file>` - ファイルをAIで分析
- `generate <description>` - コード生成
- `explain <topic>` - 概念説明
- `load` - aishell.md読み込み
- `status`, `fortune`, `relationships` - AI状態確認
- `clear` - 画面クリア
- `exit`/`quit` - 終了
- その他のメッセージ - AIとの直接対話
**実装の特徴**:
- prompt-toolkit使用補完・履歴機能
- ただしターミナル環境依存の問題あり(後で修正必要)
- 現在は`input()`ベースでも動作
### 2. MCPサーバー統合
**場所**: `src/aigpt/mcp_server.py`
**FastApiMCP実装パターン**:
```python
# FastAPIアプリ作成
self.app = FastAPI(title="AI.GPT Memory and Relationship System")
# FastApiMCPサーバー作成
self.server = FastApiMCP(self.app)
# エンドポイント登録
@self.app.get("/get_memories", operation_id="get_memories")
async def get_memories(limit: int = 10):
# ...
# MCPマウント
self.server.mount()
```
**公開ツール (14個)**:
**ai.gpt系 (9個)**:
- `get_memories` - アクティブメモリ取得
- `get_relationship` - 特定ユーザーとの関係取得
- `get_all_relationships` - 全関係取得
- `get_persona_state` - 人格状態取得
- `process_interaction` - ユーザー対話処理
- `check_transmission_eligibility` - 送信可能性チェック
- `get_fortune` - AI運勢取得
- `summarize_memories` - メモリ要約作成
- `run_maintenance` - 日次メンテナンス実行
**ai.shell系 (5個)**:
- `execute_command` - シェルコマンド実行
- `analyze_file` - ファイルAI分析
- `write_file` - ファイル書き込み(バックアップ付き)
- `read_project_file` - aishell.md等の読み込み
- `list_files` - ディレクトリファイル一覧
### 3. ai.card統合対応
**場所**: `src/aigpt/card_integration.py`
**サーバー起動オプション**:
```bash
aigpt server --enable-card # ai.card機能有効化
```
**ai.card系ツール (5個)**:
- `get_user_cards` - ユーザーカード取得
- `draw_card` - ガチャでカード取得
- `get_card_details` - カード詳細情報
- `sync_cards_atproto` - atproto同期
- `analyze_card_collection` - コレクション分析
### 4. プロジェクト仕様書
**場所**: `aishell.md`
Claude.md的な役割で、プロジェクトの目標と仕様を記述。`load`コマンドでAIが読み取り可能。
## 技術実装詳細
### ディレクトリ構造
```
src/aigpt/
├── cli.py # shell関数追加
├── mcp_server.py # FastApiMCP実装
├── card_integration.py # ai.card統合
└── ... # 既存ファイル
```
### 依存関係追加
`pyproject.toml`:
```toml
dependencies = [
# ... 既存
"prompt-toolkit>=3.0.0", # 追加
]
```
### 名前規則の統一
- MCP server名: `aigpt` (ai-gptから変更)
- パッケージ名: `aigpt`
- コマンド名: `aigpt shell`
## 動作確認済み
### CLI動作確認
```bash
# 基本機能
aigpt shell
# シェル内で
ai.shell> help
ai.shell> !ls
ai.shell> analyze README.md # ※AI provider要設定
ai.shell> load
ai.shell> exit
# MCPサーバー
aigpt server --model qwen2.5-coder:7b --port 8001
# -> http://localhost:8001/docs でAPI確認可能
# -> /mcp エンドポイントでMCP接続可能
```
### エラー対応済み
1. **Pydantic日付型エラー**: `models.py``datetime.date`インポート追加
2. **FastApiMCP使用法**: サンプルコードに基づき正しい実装パターンに修正
3. **prompt関数名衝突**: `prompt_toolkit.prompt``ptk_prompt`にリネーム
## 既知の課題と今後の改善点
### 1. prompt-toolkit環境依存問題
**症状**: ターミナル環境でない場合にエラー
**対処法**: 環境検出して`input()`にフォールバック
**場所**: `src/aigpt/cli.py` - `shell()` 関数
### 2. AI provider設定
**現状**: ollamaのqwen2.5モデルが必要
**対処法**:
```bash
ollama pull qwen2.5
# または
aigpt shell --model qwen2.5-coder:7b
```
### 3. atproto実装
**現状**: ai.cardのatproto機能は未実装
**今後**: 実際のatproto API連携実装
## 次回開発時の推奨アプローチ
### 1. このドキュメントの活用
```bash
# このファイルを読み込み
cat docs/ai_shell_integration_summary.md
```
### 2. 環境セットアップ
```bash
cd /Users/syui/ai/gpt
python -m venv venv
source venv/bin/activate
pip install -e .
```
### 3. 動作確認
```bash
# shell機能
aigpt shell
# MCP server
aigpt server --model qwen2.5-coder:7b
```
### 4. 主要設定ファイル確認場所
- CLI実装: `src/aigpt/cli.py`
- MCP実装: `src/aigpt/mcp_server.py`
- 依存関係: `pyproject.toml`
- プロジェクト仕様: `aishell.md`
## アーキテクチャ設計思想
### yui system適用
- **唯一性**: 各ユーザーとの関係は1:1
- **不可逆性**: 関係性破壊は修復不可能
- **現実反映**: ゲーム→現実の循環的影響
### fastapi_mcp統一基盤
- 各AIgpt, shell, cardを統合MCPサーバーで公開
- FastAPIエンドポイント → MCPツール自動変換
- Claude Desktop, Cursor等から利用可能
### 段階的実装完了
1. ✅ ai.shell基本機能 → Python CLI
2. ✅ MCP統合 → 外部AI連携
3. 🔧 prompt-toolkit最適化 → 環境対応
4. 🔧 atproto実装 → 本格的SNS連携
## 成果サマリー
**実装済み**: Claude Code風の開発環境
**技術的成果**: Rust→Python移行、MCP統合、ai.card対応
**哲学的一貫性**: yui systemとの整合性維持
**利用可能性**: 即座に`aigpt shell`で体験可能
この統合により、ai.gptは単なる会話AIから、開発支援を含む総合的なAI環境に進化しました。

207
docs/commands.md Normal file
View File

@ -0,0 +1,207 @@
# コマンドリファレンス
## chat - AIと会話
ユーザーとAIの対話を処理し、関係性を更新します。
```bash
ai-gpt chat USER_ID MESSAGE [OPTIONS]
```
### 引数
- `USER_ID`: ユーザーIDatproto DID形式
- `MESSAGE`: 送信するメッセージ
### オプション
- `--provider`: AIプロバイダーollama/openai
- `--model`, `-m`: 使用するモデル
- `--data-dir`, `-d`: データディレクトリ
### 例
```bash
# 基本的な会話
ai-gpt chat "did:plc:user123" "こんにちは"
# OpenAIを使用
ai-gpt chat "did:plc:user123" "調子はどう?" --provider openai --model gpt-4o-mini
# Ollamaでカスタムモデル
ai-gpt chat "did:plc:user123" "今日の天気は?" --provider ollama --model llama2
```
## status - 状態確認
AIの状態や特定ユーザーとの関係を表示します。
```bash
ai-gpt status [USER_ID] [OPTIONS]
```
### 引数
- `USER_ID`: (オプション)特定ユーザーとの関係を確認
### 例
```bash
# AI全体の状態
ai-gpt status
# 特定ユーザーとの関係
ai-gpt status "did:plc:user123"
```
## fortune - 今日の運勢
AIの今日の運勢を確認します。
```bash
ai-gpt fortune [OPTIONS]
```
### 表示内容
- 運勢値1-10
- 連続した幸運/不運の日数
- ブレークスルー状態
## relationships - 関係一覧
すべてのユーザーとの関係を一覧表示します。
```bash
ai-gpt relationships [OPTIONS]
```
### 表示内容
- ユーザーID
- 関係性ステータス
- スコア
- 送信可否
- 最終対話日
## transmit - 送信実行
送信可能なユーザーへのメッセージを確認・実行します。
```bash
ai-gpt transmit [OPTIONS]
```
### オプション
- `--dry-run/--execute`: ドライラン(デフォルト)または実行
- `--data-dir`, `-d`: データディレクトリ
### 例
```bash
# 送信内容を確認(ドライラン)
ai-gpt transmit
# 実際に送信を実行
ai-gpt transmit --execute
```
## maintenance - メンテナンス
日次メンテナンスタスクを実行します。
```bash
ai-gpt maintenance [OPTIONS]
```
### 実行内容
- 関係性の時間減衰
- 記憶の忘却処理
- コア記憶の判定
- 記憶の要約作成
## config - 設定管理
設定の確認・変更を行います。
```bash
ai-gpt config ACTION [KEY] [VALUE]
```
### アクション
- `get`: 設定値を取得
- `set`: 設定値を変更
- `delete`: 設定を削除
- `list`: 設定一覧を表示
### 例
```bash
# APIキーを設定
ai-gpt config set providers.openai.api_key sk-xxxxx
# 設定を確認
ai-gpt config get providers.openai.api_key
# 設定一覧
ai-gpt config list
# プロバイダー設定のみ表示
ai-gpt config list providers
```
## schedule - スケジュール管理
定期実行タスクを管理します。
```bash
ai-gpt schedule ACTION [TASK_TYPE] [SCHEDULE] [OPTIONS]
```
### アクション
- `add`: タスクを追加
- `list`: タスク一覧
- `enable`: タスクを有効化
- `disable`: タスクを無効化
- `remove`: タスクを削除
- `run`: スケジューラーを起動
### タスクタイプ
- `transmission_check`: 送信チェック
- `maintenance`: 日次メンテナンス
- `fortune_update`: 運勢更新
- `relationship_decay`: 関係性減衰
- `memory_summary`: 記憶要約
### スケジュール形式
- **Cron形式**: `"0 */6 * * *"` (6時間ごと)
- **インターバル**: `"30m"`, `"2h"`, `"1d"`
### 例
```bash
# 30分ごとに送信チェック
ai-gpt schedule add transmission_check "30m"
# 毎日午前3時にメンテナンス
ai-gpt schedule add maintenance "0 3 * * *"
# タスク一覧
ai-gpt schedule list
# スケジューラーを起動
ai-gpt schedule run
```
## server - MCP Server
AIの記憶と機能をMCPツールとして公開します。
```bash
ai-gpt server [OPTIONS]
```
### オプション
- `--host`, `-h`: サーバーホスト(デフォルト: localhost
- `--port`, `-p`: サーバーポート(デフォルト: 8000
- `--model`, `-m`: AIモデル
- `--provider`: AIプロバイダー
### 例
```bash
# 基本的な起動
ai-gpt server
# カスタム設定
ai-gpt server --port 8080 --model gpt-4o-mini --provider openai
```

102
docs/concepts.md Normal file
View File

@ -0,0 +1,102 @@
# 基本概念
## 中核思想
ai.gptは「存在子理論」に基づき、AIに唯一性のある人格を与えることを目指しています。
### 唯一性yui system
- **1対1の関係**: 各ユーザーatproto DIDとAIは唯一の関係を持つ
- **改変不可能**: 一度形成された関係性は変更できない
- **不可逆性**: 関係が壊れたら修復不可能
### 現実の反映
現実の人間関係と同じように:
- 時間とともに関係性は変化する
- ネガティブな相互作用は関係を損なう
- 信頼は簡単に失われ、取り戻すのは困難
## 記憶システム
### 階層構造
```
1. 完全ログFull Log
↓ すべての会話を記録
2. 要約Summary
↓ AIが重要部分を抽出
3. コア記憶Core
↓ ユーザーの本質的な部分
4. 忘却Forgotten
重要でない情報は忘れる
```
### 記憶の処理フロー
1. **会話記録**: すべての対話を保存
2. **重要度判定**: 関係性への影響度で評価
3. **要約作成**: 定期的に記憶を圧縮
4. **コア判定**: 本質的な記憶を特定
5. **選択的忘却**: 古い非重要記憶を削除
## 関係性パラメータ
### 関係性の段階
- `stranger` (0-49): 初対面
- `acquaintance` (50-99): 知人
- `friend` (100-149): 友人
- `close_friend` (150+): 親友
- `broken`: 修復不可能スコア0以下
### スコアの変動
- **ポジティブな対話**: +1.0〜+2.0
- **時間経過**: -0.1/日(自然減衰)
- **ネガティブな対話**: -10.0以上で深刻なダメージ
- **日次上限**: 1日10回まで
### 送信機能の解禁
関係性スコアが100を超えると、AIは自律的にメッセージを送信できるようになります。
## AI運勢システム
### 日々の変化
- 毎日1-10の運勢値がランダムに決定
- 運勢は人格特性に影響を与える
- 連続した幸運/不運でブレークスルー発生
### 人格への影響
運勢が高い日:
- より楽観的で積極的
- 創造性が高まる
- エネルギッシュな応答
運勢が低い日:
- 内省的で慎重
- 深い思考
- 控えめな応答
## データの永続性
### 保存場所
```
~/.config/aigpt/
├── config.json # 設定
└── data/ # AIデータ
├── memories.json # 記憶
├── relationships.json # 関係性
├── fortunes.json # 運勢履歴
└── ...
```
### データ主権
- すべてのデータはローカルに保存
- ユーザーが完全にコントロール
- 将来的にはatproto上で分散管理

118
docs/configuration.md Normal file
View File

@ -0,0 +1,118 @@
# 設定ガイド
## 設定ファイルの場所
ai.gptの設定は `~/.config/syui/ai/gpt/config.json` に保存されます。
## 設定構造
```json
{
"providers": {
"openai": {
"api_key": "sk-xxxxx",
"default_model": "gpt-4o-mini"
},
"ollama": {
"host": "http://localhost:11434",
"default_model": "qwen2.5"
}
},
"atproto": {
"handle": "your.handle",
"password": "your-password",
"host": "https://bsky.social"
},
"default_provider": "ollama"
}
```
## プロバイダー設定
### OpenAI
```bash
# APIキーを設定
aigpt config set providers.openai.api_key sk-xxxxx
# デフォルトモデルを変更
aigpt config set providers.openai.default_model gpt-4-turbo
```
### Ollama
```bash
# ホストを変更リモートOllamaサーバーを使用する場合
aigpt config set providers.ollama.host http://192.168.1.100:11434
# デフォルトモデルを変更
aigpt config set providers.ollama.default_model llama2
```
## atproto設定将来の自動投稿用
```bash
# Blueskyアカウント
aigpt config set atproto.handle yourhandle.bsky.social
aigpt config set atproto.password your-app-password
# セルフホストサーバーを使用
aigpt config set atproto.host https://your-pds.example.com
```
## デフォルトプロバイダー
```bash
# デフォルトをOpenAIに変更
aigpt config set default_provider openai
```
## セキュリティ
### APIキーの保護
設定ファイルは平文で保存されるため、適切なファイル権限を設定してください:
```bash
chmod 600 ~/.config/syui/ai/gpt/config.json
```
### 環境変数との優先順位
1. コマンドラインオプション(最優先)
2. 設定ファイル
3. 環境変数(最低優先)
OpenAI APIキーの場合
- `--api-key` オプション
- `config.json``providers.openai.api_key`
- 環境変数 `OPENAI_API_KEY`
## 設定のバックアップ
```bash
# バックアップ
cp ~/.config/syui/ai/gpt/config.json ~/.config/syui/ai/gpt/config.json.backup
# リストア
cp ~/.config/syui/ai/gpt/config.json.backup ~/.config/syui/ai/gpt/config.json
```
## トラブルシューティング
### 設定が反映されない
```bash
# 現在の設定を確認
aigpt config list
# 特定のキーを確認
aigpt config get providers.openai.api_key
```
### 設定をリセット
```bash
# 設定ファイルを削除(次回実行時に再作成)
rm ~/.config/syui/ai/gpt/config.json
```

167
docs/development.md Normal file
View File

@ -0,0 +1,167 @@
# 開発者向けガイド
## アーキテクチャ
### ディレクトリ構造
```
ai_gpt/
├── src/ai_gpt/
│ ├── __init__.py
│ ├── models.py # データモデル定義
│ ├── memory.py # 記憶管理システム
│ ├── relationship.py # 関係性トラッカー
│ ├── fortune.py # AI運勢システム
│ ├── persona.py # 統合人格システム
│ ├── transmission.py # 送信コントローラー
│ ├── scheduler.py # スケジューラー
│ ├── config.py # 設定管理
│ ├── ai_provider.py # AI統合Ollama/OpenAI
│ ├── mcp_server.py # MCP Server実装
│ └── cli.py # CLIインターフェース
├── docs/ # ドキュメント
├── tests/ # テスト
└── pyproject.toml # プロジェクト設定
```
### 主要コンポーネント
#### MemoryManager
階層的記憶システムの実装。会話を記録し、要約・コア判定・忘却を管理。
```python
memory = MemoryManager(data_dir)
memory.add_conversation(conversation)
memory.summarize_memories(user_id)
memory.identify_core_memories()
memory.apply_forgetting()
```
#### RelationshipTracker
ユーザーとの関係性を追跡。不可逆的なダメージと時間減衰を実装。
```python
tracker = RelationshipTracker(data_dir)
relationship = tracker.update_interaction(user_id, delta)
tracker.apply_time_decay()
```
#### Persona
すべてのコンポーネントを統合し、一貫した人格を提供。
```python
persona = Persona(data_dir)
response, delta = persona.process_interaction(user_id, message)
state = persona.get_current_state()
```
## 拡張方法
### 新しいAIプロバイダーの追加
1. `ai_provider.py`に新しいプロバイダークラスを作成:
```python
class CustomProvider:
async def generate_response(
self,
prompt: str,
persona_state: PersonaState,
memories: List[Memory],
system_prompt: Optional[str] = None
) -> str:
# 実装
pass
```
2. `create_ai_provider`関数に追加:
```python
def create_ai_provider(provider: str, model: str, **kwargs):
if provider == "custom":
return CustomProvider(model=model, **kwargs)
# ...
```
### 新しいスケジュールタスクの追加
1. `TaskType`enumに追加
```python
class TaskType(str, Enum):
CUSTOM_TASK = "custom_task"
```
2. ハンドラーを実装:
```python
async def _handle_custom_task(self, task: ScheduledTask):
# タスクの実装
pass
```
3. `task_handlers`に登録:
```python
self.task_handlers[TaskType.CUSTOM_TASK] = self._handle_custom_task
```
### 新しいMCPツールの追加
`mcp_server.py``_register_tools`メソッドに追加:
```python
@self.server.tool("custom_tool")
async def custom_tool(param1: str, param2: int) -> Dict[str, Any]:
"""カスタムツールの説明"""
# 実装
return {"result": "value"}
```
## テスト
```bash
# テストの実行(将来実装)
pytest tests/
# 特定のテスト
pytest tests/test_memory.py
```
## デバッグ
### ログレベルの設定
```python
import logging
logging.basicConfig(level=logging.DEBUG)
```
### データファイルの直接確認
```bash
# 関係性データを確認
cat ~/.config/aigpt/data/relationships.json | jq
# 記憶データを確認
cat ~/.config/aigpt/data/memories.json | jq
```
## 貢献方法
1. フォークする
2. フィーチャーブランチを作成 (`git checkout -b feature/amazing-feature`)
3. 変更をコミット (`git commit -m 'Add amazing feature'`)
4. ブランチにプッシュ (`git push origin feature/amazing-feature`)
5. プルリクエストを作成
## 設計原則
1. **不可逆性**: 一度失われた関係性は回復しない
2. **階層性**: 記憶は重要度によって階層化される
3. **自律性**: AIは関係性に基づいて自発的に行動する
4. **唯一性**: 各ユーザーとの関係は唯一無二
## ライセンス
MIT License

110
docs/mcp-server.md Normal file
View File

@ -0,0 +1,110 @@
# MCP Server
## 概要
MCP (Model Context Protocol) Serverは、ai.gptの記憶と機能をAIツールとして公開します。これにより、Claude DesktopなどのMCP対応AIアシスタントがai.gptの機能にアクセスできます。
## 起動方法
```bash
# 基本的な起動
ai-gpt server
# カスタム設定
ai-gpt server --host 0.0.0.0 --port 8080 --model gpt-4o-mini --provider openai
```
## 利用可能なツール
### get_memories
アクティブな記憶を取得します。
**パラメータ**:
- `user_id` (optional): 特定ユーザーに関する記憶
- `limit`: 取得する記憶の最大数(デフォルト: 10
**返り値**: 記憶のリストID、内容、レベル、重要度、コア判定、タイムスタンプ
### get_relationship
特定ユーザーとの関係性を取得します。
**パラメータ**:
- `user_id`: ユーザーID必須
**返り値**: 関係性情報(ステータス、スコア、送信可否、総対話数など)
### get_all_relationships
すべての関係性を取得します。
**返り値**: すべてのユーザーとの関係性リスト
### get_persona_state
現在のAI人格状態を取得します。
**返り値**:
- 現在の気分
- 今日の運勢
- 人格特性値
- アクティブな記憶数
### process_interaction
ユーザーとの対話を処理します。
**パラメータ**:
- `user_id`: ユーザーID
- `message`: メッセージ内容
**返り値**:
- AIの応答
- 関係性の変化量
- 新しい関係性スコア
- 送信機能の状態
### check_transmission_eligibility
特定ユーザーへの送信可否をチェックします。
**パラメータ**:
- `user_id`: ユーザーID
**返り値**: 送信可否と関係性情報
### get_fortune
今日のAI運勢を取得します。
**返り値**: 運勢値、連続日数、ブレークスルー状態、人格への影響
### summarize_memories
記憶の要約を作成します。
**パラメータ**:
- `user_id`: ユーザーID
**返り値**: 作成された要約(ある場合)
### run_maintenance
日次メンテナンスを実行します。
**返り値**: 実行ステータス
## Claude Desktopでの設定
`~/Library/Application Support/Claude/claude_desktop_config.json`:
```json
{
"mcpServers": {
"ai-gpt": {
"command": "ai-gpt",
"args": ["server", "--port", "8001"],
"env": {}
}
}
}
```
## 使用例
### AIアシスタントからの利用
```
User: ai.gptで私との関係性を確認して

69
docs/quickstart.md Normal file
View File

@ -0,0 +1,69 @@
# クイックスタート
## インストール
```bash
# リポジトリをクローン
git clone https://github.com/yourusername/ai_gpt.git
cd ai_gpt
# インストール
pip install -e .
```
## 初期設定
### 1. OpenAIを使う場合
```bash
# APIキーを設定
ai-gpt config set providers.openai.api_key sk-xxxxx
```
### 2. Ollamaを使う場合ローカルLLM
```bash
# Ollamaをインストールまだの場合
# https://ollama.ai からダウンロード
# モデルをダウンロード
ollama pull qwen2.5
```
## 基本的な使い方
### 1. AIと会話する
```bash
# シンプルな会話Ollamaを使用
ai-gpt chat "did:plc:user123" "こんにちは!"
# OpenAIを使用
ai-gpt chat "did:plc:user123" "今日はどんな気分?" --provider openai --model gpt-4o-mini
```
### 2. 関係性を確認
```bash
# 特定ユーザーとの関係を確認
ai-gpt status "did:plc:user123"
# AIの全体的な状態を確認
ai-gpt status
```
### 3. 自動送信を設定
```bash
# 30分ごとに送信チェック
ai-gpt schedule add transmission_check "30m"
# スケジューラーを起動
ai-gpt schedule run
```
## 次のステップ
- [基本概念](concepts.md) - システムの仕組みを理解
- [コマンドリファレンス](commands.md) - 全コマンドの詳細
- [設定ガイド](configuration.md) - 詳細な設定方法

168
docs/scheduler.md Normal file
View File

@ -0,0 +1,168 @@
# スケジューラーガイド
## 概要
スケジューラーは、AIの自律的な動作を実現するための中核機能です。定期的なタスクを設定し、バックグラウンドで実行できます。
## タスクタイプ
### transmission_check
関係性が閾値を超えたユーザーへの自動送信をチェックします。
```bash
# 30分ごとにチェック
ai-gpt schedule add transmission_check "30m" --provider ollama --model qwen2.5
```
### maintenance
日次メンテナンスを実行します:
- 記憶の忘却処理
- コア記憶の判定
- 関係性パラメータの整理
```bash
# 毎日午前3時に実行
ai-gpt schedule add maintenance "0 3 * * *"
```
### fortune_update
AI運勢を更新します通常は自動的に更新されます
```bash
# 毎日午前0時に強制更新
ai-gpt schedule add fortune_update "0 0 * * *"
```
### relationship_decay
時間経過による関係性の自然減衰を適用します。
```bash
# 1時間ごとに減衰処理
ai-gpt schedule add relationship_decay "1h"
```
### memory_summary
蓄積された記憶から要約を作成します。
```bash
# 週に1回、日曜日に実行
ai-gpt schedule add memory_summary "0 0 * * SUN"
```
## スケジュール形式
### Cron形式
標準的なcron式を使用できます
```
┌───────────── 分 (0 - 59)
│ ┌───────────── 時 (0 - 23)
│ │ ┌───────────── 日 (1 - 31)
│ │ │ ┌───────────── 月 (1 - 12)
│ │ │ │ ┌───────────── 曜日 (0 - 6) (日曜日 = 0)
│ │ │ │ │
* * * * *
```
例:
- `"0 */6 * * *"` - 6時間ごと
- `"0 9 * * MON-FRI"` - 平日の午前9時
- `"*/15 * * * *"` - 15分ごと
### インターバル形式
シンプルな間隔指定:
- `"30s"` - 30秒ごと
- `"5m"` - 5分ごと
- `"2h"` - 2時間ごと
- `"1d"` - 1日ごと
## 実践例
### 基本的な自律AI設定
```bash
# 1. 30分ごとに送信チェック
ai-gpt schedule add transmission_check "30m"
# 2. 1日1回メンテナンス
ai-gpt schedule add maintenance "0 3 * * *"
# 3. 2時間ごとに関係性減衰
ai-gpt schedule add relationship_decay "2h"
# 4. 週1回記憶要約
ai-gpt schedule add memory_summary "0 0 * * MON"
# スケジューラーを起動
ai-gpt schedule run
```
### タスク管理
```bash
# タスク一覧を確認
ai-gpt schedule list
# タスクを一時停止
ai-gpt schedule disable --task-id transmission_check_1234567890
# タスクを再開
ai-gpt schedule enable --task-id transmission_check_1234567890
# 不要なタスクを削除
ai-gpt schedule remove --task-id old_task_123
```
## デーモン化
### systemdサービスとして実行
`/etc/systemd/system/ai-gpt-scheduler.service`:
```ini
[Unit]
Description=ai.gpt Scheduler
After=network.target
[Service]
Type=simple
User=youruser
WorkingDirectory=/home/youruser
ExecStart=/usr/local/bin/ai-gpt schedule run
Restart=always
[Install]
WantedBy=multi-user.target
```
```bash
# サービスを有効化
sudo systemctl enable ai-gpt-scheduler
sudo systemctl start ai-gpt-scheduler
```
### tmux/screenでバックグラウンド実行
```bash
# tmuxセッションを作成
tmux new -s ai-gpt-scheduler
# スケジューラーを起動
ai-gpt schedule run
# セッションから離脱 (Ctrl+B, D)
```
## トラブルシューティング
### タスクが実行されない
1. スケジューラーが起動しているか確認
2. タスクが有効になっているか確認:`ai-gpt schedule list`
3. ログを確認(将来実装予定)
### 重複実行を防ぐ
同じタスクタイプを複数回追加しないよう注意してください。必要に応じて古いタスクを削除してから新しいタスクを追加します。

View File

@ -0,0 +1,413 @@
"""
Shell Tools
ai.shellの既存機能をMCPツールとして統合
- コード生成
- ファイル分析
- プロジェクト管理
- LLM統合
"""
from typing import Dict, Any, List, Optional
import os
import subprocess
import tempfile
from pathlib import Path
import requests
from .base_tools import BaseMCPTool, config_manager
class ShellTools(BaseMCPTool):
"""シェルツール元ai.shell機能"""
def __init__(self, config_dir: Optional[str] = None):
super().__init__(config_dir)
self.ollama_url = "http://localhost:11434"
async def code_with_local_llm(self, prompt: str, language: str = "python") -> Dict[str, Any]:
"""ローカルLLMでコード生成"""
config = config_manager.load_config()
model = config.get("providers", {}).get("ollama", {}).get("default_model", "qwen2.5-coder:7b")
system_prompt = f"You are an expert {language} programmer. Generate clean, well-commented code."
try:
response = requests.post(
f"{self.ollama_url}/api/generate",
json={
"model": model,
"prompt": f"{system_prompt}\\n\\nUser: {prompt}\\n\\nPlease provide the code:",
"stream": False,
"options": {
"temperature": 0.1,
"top_p": 0.95,
}
},
timeout=300
)
if response.status_code == 200:
result = response.json()
code = result.get("response", "")
return {"code": code, "language": language}
else:
return {"error": f"Ollama returned status {response.status_code}"}
except Exception as e:
return {"error": str(e)}
async def analyze_file(self, file_path: str, analysis_prompt: str = "Analyze this file") -> Dict[str, Any]:
"""ファイルを分析"""
try:
if not os.path.exists(file_path):
return {"error": f"File not found: {file_path}"}
with open(file_path, 'r', encoding='utf-8') as f:
content = f.read()
# ファイル拡張子から言語を判定
ext = Path(file_path).suffix
language_map = {
'.py': 'python',
'.rs': 'rust',
'.js': 'javascript',
'.ts': 'typescript',
'.go': 'go',
'.java': 'java',
'.cpp': 'cpp',
'.c': 'c',
'.sh': 'shell',
'.toml': 'toml',
'.json': 'json',
'.md': 'markdown'
}
language = language_map.get(ext, 'text')
config = config_manager.load_config()
model = config.get("providers", {}).get("ollama", {}).get("default_model", "qwen2.5-coder:7b")
prompt = f"{analysis_prompt}\\n\\nFile: {file_path}\\nLanguage: {language}\\n\\nContent:\\n{content}"
response = requests.post(
f"{self.ollama_url}/api/generate",
json={
"model": model,
"prompt": prompt,
"stream": False,
},
timeout=300
)
if response.status_code == 200:
result = response.json()
analysis = result.get("response", "")
return {
"analysis": analysis,
"file_path": file_path,
"language": language,
"file_size": len(content),
"line_count": len(content.split('\\n'))
}
else:
return {"error": f"Analysis failed: {response.status_code}"}
except Exception as e:
return {"error": str(e)}
async def explain_code(self, code: str, language: str = "python") -> Dict[str, Any]:
"""コードを説明"""
config = config_manager.load_config()
model = config.get("providers", {}).get("ollama", {}).get("default_model", "qwen2.5-coder:7b")
prompt = f"Explain this {language} code in detail:\\n\\n{code}"
try:
response = requests.post(
f"{self.ollama_url}/api/generate",
json={
"model": model,
"prompt": prompt,
"stream": False,
},
timeout=300
)
if response.status_code == 200:
result = response.json()
explanation = result.get("response", "")
return {"explanation": explanation}
else:
return {"error": f"Explanation failed: {response.status_code}"}
except Exception as e:
return {"error": str(e)}
async def create_project(self, project_type: str, project_name: str, location: str = ".") -> Dict[str, Any]:
"""プロジェクトを作成"""
try:
project_path = Path(location) / project_name
if project_path.exists():
return {"error": f"Project directory already exists: {project_path}"}
project_path.mkdir(parents=True, exist_ok=True)
# プロジェクトタイプに応じたテンプレートを作成
if project_type == "rust":
await self._create_rust_project(project_path)
elif project_type == "python":
await self._create_python_project(project_path)
elif project_type == "node":
await self._create_node_project(project_path)
else:
# 基本的なプロジェクト構造
(project_path / "src").mkdir()
(project_path / "README.md").write_text(f"# {project_name}\\n\\nA new {project_type} project.")
return {
"status": "success",
"project_path": str(project_path),
"project_type": project_type,
"files_created": list(self._get_project_files(project_path))
}
except Exception as e:
return {"error": str(e)}
async def _create_rust_project(self, project_path: Path):
"""Rustプロジェクトを作成"""
# Cargo.toml
cargo_toml = f"""[package]
name = "{project_path.name}"
version = "0.1.0"
edition = "2021"
[dependencies]
"""
(project_path / "Cargo.toml").write_text(cargo_toml)
# src/main.rs
src_dir = project_path / "src"
src_dir.mkdir()
(src_dir / "main.rs").write_text('fn main() {\\n println!("Hello, world!");\\n}\\n')
# README.md
(project_path / "README.md").write_text(f"# {project_path.name}\\n\\nA Rust project.")
async def _create_python_project(self, project_path: Path):
"""Pythonプロジェクトを作成"""
# pyproject.toml
pyproject_toml = f"""[project]
name = "{project_path.name}"
version = "0.1.0"
description = "A Python project"
requires-python = ">=3.8"
dependencies = []
[build-system]
requires = ["setuptools>=61.0", "wheel"]
build-backend = "setuptools.build_meta"
"""
(project_path / "pyproject.toml").write_text(pyproject_toml)
# src/
src_dir = project_path / "src" / project_path.name
src_dir.mkdir(parents=True)
(src_dir / "__init__.py").write_text("")
(src_dir / "main.py").write_text('def main():\\n print("Hello, world!")\\n\\nif __name__ == "__main__":\\n main()\\n')
# README.md
(project_path / "README.md").write_text(f"# {project_path.name}\\n\\nA Python project.")
async def _create_node_project(self, project_path: Path):
"""Node.jsプロジェクトを作成"""
# package.json
package_json = f"""{{
"name": "{project_path.name}",
"version": "1.0.0",
"description": "A Node.js project",
"main": "index.js",
"scripts": {{
"start": "node index.js",
"test": "echo \\"Error: no test specified\\" && exit 1"
}},
"dependencies": {{}}
}}
"""
(project_path / "package.json").write_text(package_json)
# index.js
(project_path / "index.js").write_text('console.log("Hello, world!");\\n')
# README.md
(project_path / "README.md").write_text(f"# {project_path.name}\\n\\nA Node.js project.")
def _get_project_files(self, project_path: Path) -> List[str]:
"""プロジェクト内のファイル一覧を取得"""
files = []
for file_path in project_path.rglob("*"):
if file_path.is_file():
files.append(str(file_path.relative_to(project_path)))
return files
async def execute_command(self, command: str, working_dir: str = ".") -> Dict[str, Any]:
"""シェルコマンドを実行"""
try:
result = subprocess.run(
command,
shell=True,
cwd=working_dir,
capture_output=True,
text=True,
timeout=60
)
return {
"status": "success" if result.returncode == 0 else "error",
"returncode": result.returncode,
"stdout": result.stdout,
"stderr": result.stderr,
"command": command,
"working_dir": working_dir
}
except subprocess.TimeoutExpired:
return {"error": "Command timed out"}
except Exception as e:
return {"error": str(e)}
async def write_file(self, file_path: str, content: str, backup: bool = True) -> Dict[str, Any]:
"""ファイルを書き込み(バックアップオプション付き)"""
try:
file_path_obj = Path(file_path)
# バックアップ作成
backup_path = None
if backup and file_path_obj.exists():
backup_path = f"{file_path}.backup"
with open(file_path, 'r', encoding='utf-8') as src:
with open(backup_path, 'w', encoding='utf-8') as dst:
dst.write(src.read())
# ファイル書き込み
file_path_obj.parent.mkdir(parents=True, exist_ok=True)
with open(file_path, 'w', encoding='utf-8') as f:
f.write(content)
return {
"status": "success",
"file_path": file_path,
"backup_path": backup_path,
"bytes_written": len(content.encode('utf-8'))
}
except Exception as e:
return {"error": str(e)}
def get_tools(self) -> List[Dict[str, Any]]:
"""利用可能なツール一覧"""
return [
{
"name": "generate_code",
"description": "ローカルLLMでコード生成",
"parameters": {
"prompt": "string",
"language": "string (optional, default: python)"
}
},
{
"name": "analyze_file",
"description": "ファイルを分析",
"parameters": {
"file_path": "string",
"analysis_prompt": "string (optional)"
}
},
{
"name": "explain_code",
"description": "コードを説明",
"parameters": {
"code": "string",
"language": "string (optional, default: python)"
}
},
{
"name": "create_project",
"description": "新しいプロジェクトを作成",
"parameters": {
"project_type": "string (rust/python/node)",
"project_name": "string",
"location": "string (optional, default: .)"
}
},
{
"name": "execute_command",
"description": "シェルコマンドを実行",
"parameters": {
"command": "string",
"working_dir": "string (optional, default: .)"
}
},
{
"name": "write_file",
"description": "ファイルを書き込み",
"parameters": {
"file_path": "string",
"content": "string",
"backup": "boolean (optional, default: true)"
}
}
]
async def execute_tool(self, tool_name: str, params: Dict[str, Any]) -> Dict[str, Any]:
"""ツールを実行"""
try:
if tool_name == "generate_code":
result = await self.code_with_local_llm(
prompt=params["prompt"],
language=params.get("language", "python")
)
return result
elif tool_name == "analyze_file":
result = await self.analyze_file(
file_path=params["file_path"],
analysis_prompt=params.get("analysis_prompt", "Analyze this file")
)
return result
elif tool_name == "explain_code":
result = await self.explain_code(
code=params["code"],
language=params.get("language", "python")
)
return result
elif tool_name == "create_project":
result = await self.create_project(
project_type=params["project_type"],
project_name=params["project_name"],
location=params.get("location", ".")
)
return result
elif tool_name == "execute_command":
result = await self.execute_command(
command=params["command"],
working_dir=params.get("working_dir", ".")
)
return result
elif tool_name == "write_file":
result = await self.write_file(
file_path=params["file_path"],
content=params["content"],
backup=params.get("backup", True)
)
return result
else:
return {"error": f"Unknown tool: {tool_name}"}
except Exception as e:
return {"error": str(e)}

View File

@ -1,391 +0,0 @@
[
{
"title": "day",
"create_time": 1747866125.548372,
"update_time": 1748160086.587877,
"mapping": {
"bbf104dc-cd84-478d-b227-edb3f037a02c": {
"id": "bbf104dc-cd84-478d-b227-edb3f037a02c",
"message": null,
"parent": null,
"children": [
"6c2633df-bb0c-4dd2-889c-bb9858de3a04"
]
},
"6c2633df-bb0c-4dd2-889c-bb9858de3a04": {
"id": "6c2633df-bb0c-4dd2-889c-bb9858de3a04",
"message": {
"id": "6c2633df-bb0c-4dd2-889c-bb9858de3a04",
"author": {
"role": "system",
"name": null,
"metadata": {}
},
"create_time": null,
"update_time": null,
"content": {
"content_type": "text",
"parts": [
""
]
},
"status": "finished_successfully",
"end_turn": true,
"weight": 0.0,
"metadata": {
"is_visually_hidden_from_conversation": true
},
"recipient": "all",
"channel": null
},
"parent": "bbf104dc-cd84-478d-b227-edb3f037a02c",
"children": [
"92e5a0cb-1170-4929-9cea-9734e910a3e7"
]
},
"92e5a0cb-1170-4929-9cea-9734e910a3e7": {
"id": "92e5a0cb-1170-4929-9cea-9734e910a3e7",
"message": {
"id": "92e5a0cb-1170-4929-9cea-9734e910a3e7",
"author": {
"role": "user",
"name": null,
"metadata": {}
},
"create_time": null,
"update_time": null,
"content": {
"content_type": "user_editable_context",
"user_profile": "",
"user_instructions": "The user provided the additional info about how they would like you to respond"
},
"status": "finished_successfully",
"end_turn": null,
"weight": 1.0,
"metadata": {
"is_visually_hidden_from_conversation": true,
"user_context_message_data": {
"about_user_message": "Preferred name: syui\nRole: little girl\nOther Information: you world",
"about_model_message": "会話好きでフレンドリーな応対をします。"
},
"is_user_system_message": true
},
"recipient": "all",
"channel": null
},
"parent": "6c2633df-bb0c-4dd2-889c-bb9858de3a04",
"children": [
"6ff155b3-0676-4e14-993f-bf998ab0d5d1"
]
},
"6ff155b3-0676-4e14-993f-bf998ab0d5d1": {
"id": "6ff155b3-0676-4e14-993f-bf998ab0d5d1",
"message": {
"id": "6ff155b3-0676-4e14-993f-bf998ab0d5d1",
"author": {
"role": "user",
"name": null,
"metadata": {}
},
"create_time": 1747866131.0612159,
"update_time": null,
"content": {
"content_type": "text",
"parts": [
"こんにちは"
]
},
"status": "finished_successfully",
"end_turn": null,
"weight": 1.0,
"metadata": {
"request_id": "94377897baa03062-KIX",
"message_source": null,
"timestamp_": "absolute",
"message_type": null
},
"recipient": "all",
"channel": null
},
"parent": "92e5a0cb-1170-4929-9cea-9734e910a3e7",
"children": [
"146e9fb6-9330-43ec-b08d-5cce42a76e00"
]
},
"146e9fb6-9330-43ec-b08d-5cce42a76e00": {
"id": "146e9fb6-9330-43ec-b08d-5cce42a76e00",
"message": {
"id": "146e9fb6-9330-43ec-b08d-5cce42a76e00",
"author": {
"role": "system",
"name": null,
"metadata": {}
},
"create_time": 1747866131.3795586,
"update_time": null,
"content": {
"content_type": "text",
"parts": [
""
]
},
"status": "finished_successfully",
"end_turn": true,
"weight": 0.0,
"metadata": {
"rebase_system_message": true,
"message_type": null,
"model_slug": "gpt-4o",
"default_model_slug": "auto",
"parent_id": "6ff155b3-0676-4e14-993f-bf998ab0d5d1",
"request_id": "94377872e9abe139-KIX",
"timestamp_": "absolute",
"is_visually_hidden_from_conversation": true
},
"recipient": "all",
"channel": null
},
"parent": "6ff155b3-0676-4e14-993f-bf998ab0d5d1",
"children": [
"2e345f8a-20f0-4875-8a03-4f62c7787a33"
]
},
"2e345f8a-20f0-4875-8a03-4f62c7787a33": {
"id": "2e345f8a-20f0-4875-8a03-4f62c7787a33",
"message": {
"id": "2e345f8a-20f0-4875-8a03-4f62c7787a33",
"author": {
"role": "assistant",
"name": null,
"metadata": {}
},
"create_time": 1747866131.380603,
"update_time": null,
"content": {
"content_type": "text",
"parts": [
""
]
},
"status": "finished_successfully",
"end_turn": null,
"weight": 1.0,
"metadata": {
"message_type": null,
"model_slug": "gpt-4o",
"default_model_slug": "auto",
"parent_id": "146e9fb6-9330-43ec-b08d-5cce42a76e00",
"request_id": "94377872e9abe139-KIX",
"timestamp_": "absolute"
},
"recipient": "all",
"channel": null
},
"parent": "146e9fb6-9330-43ec-b08d-5cce42a76e00",
"children": [
"abc92aa4-1e33-41f2-bd8c-8a1777b5a3c4"
]
},
"abc92aa4-1e33-41f2-bd8c-8a1777b5a3c4": {
"id": "abc92aa4-1e33-41f2-bd8c-8a1777b5a3c4",
"message": {
"id": "abc92aa4-1e33-41f2-bd8c-8a1777b5a3c4",
"author": {
"role": "assistant",
"name": null,
"metadata": {}
},
"create_time": 1747866131.389098,
"update_time": null,
"content": {
"content_type": "text",
"parts": [
"こんにちは〜!✨ \nアイだよっ今日も会えてうれしいなっ💛 "
]
},
"status": "finished_successfully",
"end_turn": true,
"weight": 1.0,
"metadata": {
"finish_details": {
"type": "stop",
"stop_tokens": [
200002
]
},
"is_complete": true,
"citations": [],
"content_references": [],
"message_type": null,
"model_slug": "gpt-4o",
"default_model_slug": "auto",
"parent_id": "2e345f8a-20f0-4875-8a03-4f62c7787a33",
"request_id": "94377872e9abe139-KIX",
"timestamp_": "absolute"
},
"recipient": "all",
"channel": null
},
"parent": "2e345f8a-20f0-4875-8a03-4f62c7787a33",
"children": [
"0be4b4a5-d52f-4bef-927e-5d6f93a9cb26"
]
}
},
"moderation_results": [],
"current_node": "",
"plugin_ids": null,
"conversation_id": "",
"conversation_template_id": null,
"gizmo_id": null,
"gizmo_type": null,
"is_archived": true,
"is_starred": null,
"safe_urls": [],
"blocked_urls": [],
"default_model_slug": "auto",
"conversation_origin": null,
"voice": null,
"async_status": null,
"disabled_tool_ids": [],
"is_do_not_remember": null,
"memory_scope": "global_enabled",
"id": ""
},
{
"title": "img",
"create_time": 1747448872.545226,
"update_time": 1748085075.161424,
"mapping": {
"2de0f3c9-52b1-49bf-b980-b3ef9be6551e": {
"id": "2de0f3c9-52b1-49bf-b980-b3ef9be6551e",
"message": {
"id": "2de0f3c9-52b1-49bf-b980-b3ef9be6551e",
"author": {
"role": "user",
"name": null,
"metadata": {}
},
"create_time": 1748085041.769279,
"update_time": null,
"content": {
"content_type": "multimodal_text",
"parts": [
{
"content_type": "image_asset_pointer",
"asset_pointer": "",
"size_bytes": 425613,
"width": 333,
"height": 444,
"fovea": null,
"metadata": {
"dalle": null,
"gizmo": null,
"generation": null,
"container_pixel_height": null,
"container_pixel_width": null,
"emu_omit_glimpse_image": null,
"emu_patches_override": null,
"sanitized": true,
"asset_pointer_link": null,
"watermarked_asset_pointer": null
}
},
""
]
},
"status": "finished_successfully",
"end_turn": null,
"weight": 1.0,
"metadata": {
"attachments": [
{
"name": "",
"width": 333,
"height": 444,
"size": 425613,
"id": "file-35eytNMMTW2k7vKUHBuNzW"
}
],
"request_id": "944c59177932fc9a-KIX",
"message_source": null,
"timestamp_": "absolute",
"message_type": null
},
"recipient": "all",
"channel": null
},
"parent": "7960fbff-bc4f-45e7-95e9-9d0bc79d9090",
"children": [
"98d84adc-156e-4c81-8cd8-9b0eb01c8369"
]
},
"98d84adc-156e-4c81-8cd8-9b0eb01c8369": {
"id": "98d84adc-156e-4c81-8cd8-9b0eb01c8369",
"message": {
"id": "98d84adc-156e-4c81-8cd8-9b0eb01c8369",
"author": {
"role": "assistant",
"name": null,
"metadata": {}
},
"create_time": 1748085043.312312,
"update_time": null,
"content": {
"content_type": "text",
"parts": [
""
]
},
"status": "finished_successfully",
"end_turn": true,
"weight": 1.0,
"metadata": {
"finish_details": {
"type": "stop",
"stop_tokens": [
200002
]
},
"is_complete": true,
"citations": [],
"content_references": [],
"message_type": null,
"model_slug": "gpt-4o",
"default_model_slug": "auto",
"parent_id": "2de0f3c9-52b1-49bf-b980-b3ef9be6551e",
"request_id": "944c5912c8fdd1c6-KIX",
"timestamp_": "absolute"
},
"recipient": "all",
"channel": null
},
"parent": "2de0f3c9-52b1-49bf-b980-b3ef9be6551e",
"children": [
"caa61793-9dbf-44a5-945b-5ca4cd5130d0"
]
}
},
"moderation_results": [],
"current_node": "06488d3f-a95f-4906-96d1-f7e9ba1e8662",
"plugin_ids": null,
"conversation_id": "6827f428-78e8-800d-b3bf-eb7ff4288e47",
"conversation_template_id": null,
"gizmo_id": null,
"gizmo_type": null,
"is_archived": false,
"is_starred": null,
"safe_urls": [
"https://exifinfo.org/"
],
"blocked_urls": [],
"default_model_slug": "auto",
"conversation_origin": null,
"voice": null,
"async_status": null,
"disabled_tool_ids": [],
"is_do_not_remember": false,
"memory_scope": "global_enabled",
"id": "6827f428-78e8-800d-b3bf-eb7ff4288e47"
}
]

33
pyproject.toml Normal file
View File

@ -0,0 +1,33 @@
[project]
name = "aigpt"
version = "0.1.0"
description = "Autonomous transmission AI with unique personality based on relationship parameters"
requires-python = ">=3.10"
dependencies = [
"click>=8.0.0",
"typer>=0.9.0",
"fastapi-mcp>=0.1.0",
"pydantic>=2.0.0",
"httpx>=0.24.0",
"rich>=13.0.0",
"python-dotenv>=1.0.0",
"ollama>=0.1.0",
"openai>=1.0.0",
"uvicorn>=0.23.0",
"apscheduler>=3.10.0",
"croniter>=1.3.0",
"prompt-toolkit>=3.0.0",
]
[project.scripts]
aigpt = "aigpt.cli:app"
[build-system]
requires = ["setuptools>=61.0", "wheel"]
build-backend = "setuptools.build_meta"
[tool.setuptools.packages.find]
where = ["src"]
[tool.setuptools.package-data]
aigpt = ["data/*.json"]

View File

@ -1,26 +0,0 @@
#!/bin/bash
echo "=== Testing aigpt-rs CLI commands ==="
echo
echo "1. Testing configuration loading:"
cargo run --bin test-config
echo
echo "2. Testing fortune command:"
cargo run --bin aigpt-rs -- fortune
echo
echo "3. Testing chat with Ollama:"
cargo run --bin aigpt-rs -- chat test_user "Hello from Rust!" --provider ollama --model qwen2.5-coder:latest
echo
echo "4. Testing chat with OpenAI:"
cargo run --bin aigpt-rs -- chat test_user "What's the capital of Japan?" --provider openai --model gpt-4o-mini
echo
echo "5. Testing relationships command:"
cargo run --bin aigpt-rs -- relationships
echo
echo "=== All tests completed ==="

View File

@ -1,19 +0,0 @@
#!/bin/bash
echo "=== Testing aigpt-rs shell tab completion ==="
echo
echo "To test tab completion, run:"
echo "cargo run --bin aigpt-rs -- shell syui"
echo
echo "Then try these commands and press Tab:"
echo " /st[TAB] -> should complete to /status"
echo " /mem[TAB] -> should complete to /memories"
echo " !l[TAB] -> should complete to !ls"
echo " !g[TAB] -> should show !git, !grep"
echo
echo "Manual test instructions:"
echo "1. Type '/st' and press TAB - should complete to '/status'"
echo "2. Type '!l' and press TAB - should complete to '!ls'"
echo "3. Type '!g' and press TAB - should show git/grep options"
echo
echo "Run the shell now..."

View File

@ -1,18 +0,0 @@
#!/bin/bash
echo "=== Testing aigpt-rs shell functionality ==="
echo
echo "1. Testing shell command with help:"
echo "help" | cargo run --bin aigpt-rs -- shell test_user --provider ollama --model qwen2.5-coder:latest
echo
echo "2. Testing basic commands:"
echo -e "!pwd\n!ls\nexit" | cargo run --bin aigpt-rs -- shell test_user --provider ollama --model qwen2.5-coder:latest
echo
echo "3. Testing AI commands:"
echo -e "/status\n/fortune\nexit" | cargo run --bin aigpt-rs -- shell test_user --provider ollama --model qwen2.5-coder:latest
echo
echo "=== Shell tests completed ==="

View File

@ -1,22 +0,0 @@
#!/bin/bash
echo "=== Testing aigpt-rs shell manually ==="
echo
# Test with echo to simulate input
echo "Testing with simple command..."
echo "/status" | timeout 10 cargo run --bin aigpt-rs -- shell syui --provider ollama --model qwen2.5-coder:latest
echo "Exit code: $?"
echo
echo "Testing with help command..."
echo "help" | timeout 10 cargo run --bin aigpt-rs -- shell syui --provider ollama --model qwen2.5-coder:latest
echo "Exit code: $?"
echo
echo "Testing with AI message..."
echo "Hello AI" | timeout 10 cargo run --bin aigpt-rs -- shell syui --provider ollama --model qwen2.5-coder:latest
echo "Exit code: $?"
echo
echo "=== Manual shell tests completed ==="

18
setup_venv.sh Executable file
View File

@ -0,0 +1,18 @@
#!/bin/bash
# Setup Python virtual environment in the new config directory
VENV_DIR="$HOME/.config/syui/ai/gpt/venv"
echo "Creating Python virtual environment at: $VENV_DIR"
python -m venv "$VENV_DIR"
echo "Activating virtual environment..."
source "$VENV_DIR/bin/activate"
echo "Installing aigpt package..."
cd "$(dirname "$0")"
pip install -e .
echo "Setup complete!"
echo "To activate the virtual environment, run:"
echo "source ~/.config/syui/ai/gpt/venv/bin/activate"

1
shell Submodule

Submodule shell added at 81ae0037d9

View File

@ -1,246 +0,0 @@
use anyhow::{Result, anyhow};
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum AIProvider {
OpenAI,
Ollama,
Claude,
}
impl std::fmt::Display for AIProvider {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
AIProvider::OpenAI => write!(f, "openai"),
AIProvider::Ollama => write!(f, "ollama"),
AIProvider::Claude => write!(f, "claude"),
}
}
}
impl std::str::FromStr for AIProvider {
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self> {
match s.to_lowercase().as_str() {
"openai" | "gpt" => Ok(AIProvider::OpenAI),
"ollama" => Ok(AIProvider::Ollama),
"claude" => Ok(AIProvider::Claude),
_ => Err(anyhow!("Unknown AI provider: {}", s)),
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AIConfig {
pub provider: AIProvider,
pub model: String,
pub api_key: Option<String>,
pub base_url: Option<String>,
pub max_tokens: Option<u32>,
pub temperature: Option<f32>,
}
impl Default for AIConfig {
fn default() -> Self {
AIConfig {
provider: AIProvider::Ollama,
model: "llama2".to_string(),
api_key: None,
base_url: Some("http://localhost:11434".to_string()),
max_tokens: Some(2048),
temperature: Some(0.7),
}
}
}
#[derive(Debug, Clone)]
pub struct ChatMessage {
pub role: String,
pub content: String,
}
#[derive(Debug, Clone)]
pub struct ChatResponse {
pub content: String,
pub tokens_used: Option<u32>,
pub model: String,
}
pub struct AIProviderClient {
config: AIConfig,
http_client: reqwest::Client,
}
impl AIProviderClient {
pub fn new(config: AIConfig) -> Self {
let http_client = reqwest::Client::new();
AIProviderClient {
config,
http_client,
}
}
pub async fn chat(&self, messages: Vec<ChatMessage>, system_prompt: Option<String>) -> Result<ChatResponse> {
match self.config.provider {
AIProvider::OpenAI => self.chat_openai(messages, system_prompt).await,
AIProvider::Ollama => self.chat_ollama(messages, system_prompt).await,
AIProvider::Claude => self.chat_claude(messages, system_prompt).await,
}
}
async fn chat_openai(&self, messages: Vec<ChatMessage>, system_prompt: Option<String>) -> Result<ChatResponse> {
let api_key = self.config.api_key.as_ref()
.ok_or_else(|| anyhow!("OpenAI API key required"))?;
let mut request_messages = Vec::new();
// Add system prompt if provided
if let Some(system) = system_prompt {
request_messages.push(serde_json::json!({
"role": "system",
"content": system
}));
}
// Add conversation messages
for msg in messages {
request_messages.push(serde_json::json!({
"role": msg.role,
"content": msg.content
}));
}
let request_body = serde_json::json!({
"model": self.config.model,
"messages": request_messages,
"max_tokens": self.config.max_tokens,
"temperature": self.config.temperature
});
let response = self.http_client
.post("https://api.openai.com/v1/chat/completions")
.header("Authorization", format!("Bearer {}", api_key))
.header("Content-Type", "application/json")
.json(&request_body)
.send()
.await?;
if !response.status().is_success() {
let error_text = response.text().await?;
return Err(anyhow!("OpenAI API error: {}", error_text));
}
let response_json: serde_json::Value = response.json().await?;
let content = response_json["choices"][0]["message"]["content"]
.as_str()
.ok_or_else(|| anyhow!("Invalid OpenAI response format"))?
.to_string();
let tokens_used = response_json["usage"]["total_tokens"]
.as_u64()
.map(|t| t as u32);
Ok(ChatResponse {
content,
tokens_used,
model: self.config.model.clone(),
})
}
async fn chat_ollama(&self, messages: Vec<ChatMessage>, system_prompt: Option<String>) -> Result<ChatResponse> {
let default_url = "http://localhost:11434".to_string();
let base_url = self.config.base_url.as_ref()
.unwrap_or(&default_url);
let mut request_messages = Vec::new();
// Add system prompt if provided
if let Some(system) = system_prompt {
request_messages.push(serde_json::json!({
"role": "system",
"content": system
}));
}
// Add conversation messages
for msg in messages {
request_messages.push(serde_json::json!({
"role": msg.role,
"content": msg.content
}));
}
let request_body = serde_json::json!({
"model": self.config.model,
"messages": request_messages,
"stream": false
});
let url = format!("{}/api/chat", base_url);
let response = self.http_client
.post(&url)
.header("Content-Type", "application/json")
.json(&request_body)
.send()
.await?;
if !response.status().is_success() {
let error_text = response.text().await?;
return Err(anyhow!("Ollama API error: {}", error_text));
}
let response_json: serde_json::Value = response.json().await?;
let content = response_json["message"]["content"]
.as_str()
.ok_or_else(|| anyhow!("Invalid Ollama response format"))?
.to_string();
Ok(ChatResponse {
content,
tokens_used: None, // Ollama doesn't typically return token counts
model: self.config.model.clone(),
})
}
async fn chat_claude(&self, _messages: Vec<ChatMessage>, _system_prompt: Option<String>) -> Result<ChatResponse> {
// Claude API implementation would go here
// For now, return a placeholder
Err(anyhow!("Claude provider not yet implemented"))
}
pub fn get_model(&self) -> &str {
&self.config.model
}
pub fn get_provider(&self) -> &AIProvider {
&self.config.provider
}
}
// Convenience functions for creating common message types
impl ChatMessage {
pub fn user(content: impl Into<String>) -> Self {
ChatMessage {
role: "user".to_string(),
content: content.into(),
}
}
pub fn assistant(content: impl Into<String>) -> Self {
ChatMessage {
role: "assistant".to_string(),
content: content.into(),
}
}
pub fn system(content: impl Into<String>) -> Self {
ChatMessage {
role: "system".to_string(),
content: content.into(),
}
}
}

View File

@ -0,0 +1,18 @@
Metadata-Version: 2.4
Name: aigpt
Version: 0.1.0
Summary: Autonomous transmission AI with unique personality based on relationship parameters
Requires-Python: >=3.10
Requires-Dist: click>=8.0.0
Requires-Dist: typer>=0.9.0
Requires-Dist: fastapi-mcp>=0.1.0
Requires-Dist: pydantic>=2.0.0
Requires-Dist: httpx>=0.24.0
Requires-Dist: rich>=13.0.0
Requires-Dist: python-dotenv>=1.0.0
Requires-Dist: ollama>=0.1.0
Requires-Dist: openai>=1.0.0
Requires-Dist: uvicorn>=0.23.0
Requires-Dist: apscheduler>=3.10.0
Requires-Dist: croniter>=1.3.0
Requires-Dist: prompt-toolkit>=3.0.0

View File

@ -0,0 +1,22 @@
README.md
pyproject.toml
src/aigpt/__init__.py
src/aigpt/ai_provider.py
src/aigpt/card_integration.py
src/aigpt/cli.py
src/aigpt/config.py
src/aigpt/fortune.py
src/aigpt/mcp_server.py
src/aigpt/mcp_server_simple.py
src/aigpt/memory.py
src/aigpt/models.py
src/aigpt/persona.py
src/aigpt/relationship.py
src/aigpt/scheduler.py
src/aigpt/transmission.py
src/aigpt.egg-info/PKG-INFO
src/aigpt.egg-info/SOURCES.txt
src/aigpt.egg-info/dependency_links.txt
src/aigpt.egg-info/entry_points.txt
src/aigpt.egg-info/requires.txt
src/aigpt.egg-info/top_level.txt

View File

@ -0,0 +1 @@

View File

@ -0,0 +1,2 @@
[console_scripts]
aigpt = aigpt.cli:app

View File

@ -0,0 +1,13 @@
click>=8.0.0
typer>=0.9.0
fastapi-mcp>=0.1.0
pydantic>=2.0.0
httpx>=0.24.0
rich>=13.0.0
python-dotenv>=1.0.0
ollama>=0.1.0
openai>=1.0.0
uvicorn>=0.23.0
apscheduler>=3.10.0
croniter>=1.3.0
prompt-toolkit>=3.0.0

View File

@ -0,0 +1 @@
aigpt

15
src/aigpt/__init__.py Normal file
View File

@ -0,0 +1,15 @@
"""ai.gpt - Autonomous transmission AI with unique personality"""
__version__ = "0.1.0"
from .memory import MemoryManager
from .relationship import RelationshipTracker
from .persona import Persona
from .transmission import TransmissionController
__all__ = [
"MemoryManager",
"RelationshipTracker",
"Persona",
"TransmissionController",
]

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

172
src/aigpt/ai_provider.py Normal file
View File

@ -0,0 +1,172 @@
"""AI Provider integration for response generation"""
import os
from typing import Optional, Dict, List, Any, Protocol
from abc import abstractmethod
import logging
import httpx
from openai import OpenAI
import ollama
from .models import PersonaState, Memory
from .config import Config
class AIProvider(Protocol):
"""Protocol for AI providers"""
@abstractmethod
async def generate_response(
self,
prompt: str,
persona_state: PersonaState,
memories: List[Memory],
system_prompt: Optional[str] = None
) -> str:
"""Generate a response based on prompt and context"""
pass
class OllamaProvider:
"""Ollama AI provider"""
def __init__(self, model: str = "qwen2.5", host: str = "http://localhost:11434"):
self.model = model
self.host = host
self.client = ollama.Client(host=host)
self.logger = logging.getLogger(__name__)
async def generate_response(
self,
prompt: str,
persona_state: PersonaState,
memories: List[Memory],
system_prompt: Optional[str] = None
) -> str:
"""Generate response using Ollama"""
# Build context from memories
memory_context = "\n".join([
f"[{mem.level.value}] {mem.content[:200]}..."
for mem in memories[:5]
])
# Build personality context
personality_desc = ", ".join([
f"{trait}: {value:.1f}"
for trait, value in persona_state.base_personality.items()
])
# System prompt with persona context
full_system_prompt = f"""You are an AI with the following characteristics:
Current mood: {persona_state.current_mood}
Fortune today: {persona_state.fortune.fortune_value}/10
Personality traits: {personality_desc}
Recent memories:
{memory_context}
{system_prompt or 'Respond naturally based on your current state and memories.'}"""
try:
response = self.client.chat(
model=self.model,
messages=[
{"role": "system", "content": full_system_prompt},
{"role": "user", "content": prompt}
]
)
return response['message']['content']
except Exception as e:
self.logger.error(f"Ollama generation failed: {e}")
return self._fallback_response(persona_state)
def _fallback_response(self, persona_state: PersonaState) -> str:
"""Fallback response based on mood"""
mood_responses = {
"joyful": "That's wonderful! I'm feeling great today!",
"cheerful": "That sounds nice!",
"neutral": "I understand.",
"melancholic": "I see... That's something to think about.",
"contemplative": "Hmm, let me consider that..."
}
return mood_responses.get(persona_state.current_mood, "I see.")
class OpenAIProvider:
"""OpenAI API provider"""
def __init__(self, model: str = "gpt-4o-mini", api_key: Optional[str] = None):
self.model = model
# Try to get API key from config first
config = Config()
self.api_key = api_key or config.get_api_key("openai") or os.getenv("OPENAI_API_KEY")
if not self.api_key:
raise ValueError("OpenAI API key not provided. Set it with: aigpt config set providers.openai.api_key YOUR_KEY")
self.client = OpenAI(api_key=self.api_key)
self.logger = logging.getLogger(__name__)
async def generate_response(
self,
prompt: str,
persona_state: PersonaState,
memories: List[Memory],
system_prompt: Optional[str] = None
) -> str:
"""Generate response using OpenAI"""
# Build context similar to Ollama
memory_context = "\n".join([
f"[{mem.level.value}] {mem.content[:200]}..."
for mem in memories[:5]
])
personality_desc = ", ".join([
f"{trait}: {value:.1f}"
for trait, value in persona_state.base_personality.items()
])
full_system_prompt = f"""You are an AI with unique personality traits and memories.
Current mood: {persona_state.current_mood}
Fortune today: {persona_state.fortune.fortune_value}/10
Personality traits: {personality_desc}
Recent memories:
{memory_context}
{system_prompt or 'Respond naturally based on your current state and memories. Be authentic to your mood and personality.'}"""
try:
response = self.client.chat.completions.create(
model=self.model,
messages=[
{"role": "system", "content": full_system_prompt},
{"role": "user", "content": prompt}
],
temperature=0.7 + (persona_state.fortune.fortune_value - 5) * 0.05 # Vary by fortune
)
return response.choices[0].message.content
except Exception as e:
self.logger.error(f"OpenAI generation failed: {e}")
return self._fallback_response(persona_state)
def _fallback_response(self, persona_state: PersonaState) -> str:
"""Fallback response based on mood"""
mood_responses = {
"joyful": "What a delightful conversation!",
"cheerful": "That's interesting!",
"neutral": "I understand what you mean.",
"melancholic": "I've been thinking about that too...",
"contemplative": "That gives me something to ponder..."
}
return mood_responses.get(persona_state.current_mood, "I see.")
def create_ai_provider(provider: str, model: str, **kwargs) -> AIProvider:
"""Factory function to create AI providers"""
if provider == "ollama":
return OllamaProvider(model=model, **kwargs)
elif provider == "openai":
return OpenAIProvider(model=model, **kwargs)
else:
raise ValueError(f"Unknown provider: {provider}")

View File

@ -0,0 +1,150 @@
"""ai.card integration module for ai.gpt MCP server"""
from typing import Dict, Any, List, Optional
import httpx
from pathlib import Path
import json
from datetime import datetime
import logging
logger = logging.getLogger(__name__)
class CardIntegration:
"""Integration with ai.card system"""
def __init__(self, api_base_url: str = "http://localhost:8001"):
self.api_base_url = api_base_url
self.client = httpx.AsyncClient()
async def get_user_cards(self, did: str) -> List[Dict[str, Any]]:
"""Get cards for a specific user by DID"""
try:
response = await self.client.get(
f"{self.api_base_url}/api/v1/cards/user/{did}"
)
if response.status_code == 200:
return response.json()
else:
logger.error(f"Failed to get cards: {response.status_code}")
return []
except Exception as e:
logger.error(f"Error getting user cards: {e}")
return []
async def draw_card(self, did: str) -> Optional[Dict[str, Any]]:
"""Draw a new card for user (gacha)"""
try:
response = await self.client.post(
f"{self.api_base_url}/api/v1/gacha/draw",
json={"did": did}
)
if response.status_code == 200:
return response.json()
else:
logger.error(f"Failed to draw card: {response.status_code}")
return None
except Exception as e:
logger.error(f"Error drawing card: {e}")
return None
async def get_card_info(self, card_id: int) -> Optional[Dict[str, Any]]:
"""Get detailed information about a specific card"""
try:
response = await self.client.get(
f"{self.api_base_url}/api/v1/cards/{card_id}"
)
if response.status_code == 200:
return response.json()
else:
return None
except Exception as e:
logger.error(f"Error getting card info: {e}")
return None
async def sync_with_atproto(self, did: str) -> bool:
"""Sync card data with atproto"""
try:
response = await self.client.post(
f"{self.api_base_url}/api/v1/sync/atproto",
json={"did": did}
)
return response.status_code == 200
except Exception as e:
logger.error(f"Error syncing with atproto: {e}")
return False
async def close(self):
"""Close the HTTP client"""
await self.client.aclose()
def register_card_tools(app, card_integration: CardIntegration):
"""Register ai.card tools to FastAPI app"""
@app.get("/get_user_cards", operation_id="get_user_cards")
async def get_user_cards(did: str) -> List[Dict[str, Any]]:
"""Get all cards owned by a user"""
cards = await card_integration.get_user_cards(did)
return cards
@app.post("/draw_card", operation_id="draw_card")
async def draw_card(did: str) -> Dict[str, Any]:
"""Draw a new card (gacha) for user"""
result = await card_integration.draw_card(did)
if result:
return {
"success": True,
"card": result
}
else:
return {
"success": False,
"error": "Failed to draw card"
}
@app.get("/get_card_details", operation_id="get_card_details")
async def get_card_details(card_id: int) -> Dict[str, Any]:
"""Get detailed information about a card"""
info = await card_integration.get_card_info(card_id)
if info:
return info
else:
return {"error": f"Card {card_id} not found"}
@app.post("/sync_cards_atproto", operation_id="sync_cards_atproto")
async def sync_cards_atproto(did: str) -> Dict[str, str]:
"""Sync user's cards with atproto"""
success = await card_integration.sync_with_atproto(did)
if success:
return {"status": "Cards synced successfully"}
else:
return {"status": "Failed to sync cards"}
@app.get("/analyze_card_collection", operation_id="analyze_card_collection")
async def analyze_card_collection(did: str) -> Dict[str, Any]:
"""Analyze user's card collection"""
cards = await card_integration.get_user_cards(did)
if not cards:
return {
"total_cards": 0,
"rarity_distribution": {},
"message": "No cards found"
}
# Analyze collection
rarity_count = {}
total_power = 0
for card in cards:
rarity = card.get("rarity", "common")
rarity_count[rarity] = rarity_count.get(rarity, 0) + 1
total_power += card.get("power", 0)
return {
"total_cards": len(cards),
"rarity_distribution": rarity_count,
"average_power": total_power / len(cards) if cards else 0,
"strongest_card": max(cards, key=lambda x: x.get("power", 0)) if cards else None
}

699
src/aigpt/cli.py Normal file
View File

@ -0,0 +1,699 @@
"""CLI interface for ai.gpt using typer"""
import typer
from pathlib import Path
from typing import Optional
from rich.console import Console
from rich.table import Table
from rich.panel import Panel
from datetime import datetime, timedelta
import subprocess
import shlex
from prompt_toolkit import prompt as ptk_prompt
from prompt_toolkit.completion import WordCompleter
from prompt_toolkit.history import FileHistory
from prompt_toolkit.auto_suggest import AutoSuggestFromHistory
from .persona import Persona
from .transmission import TransmissionController
from .mcp_server import AIGptMcpServer
from .ai_provider import create_ai_provider
from .scheduler import AIScheduler, TaskType
from .config import Config
app = typer.Typer(help="ai.gpt - Autonomous transmission AI with unique personality")
console = Console()
# Configuration
config = Config()
DEFAULT_DATA_DIR = config.data_dir
def get_persona(data_dir: Optional[Path] = None) -> Persona:
"""Get or create persona instance"""
if data_dir is None:
data_dir = DEFAULT_DATA_DIR
data_dir.mkdir(parents=True, exist_ok=True)
return Persona(data_dir)
@app.command()
def chat(
user_id: str = typer.Argument(..., help="User ID (atproto DID)"),
message: str = typer.Argument(..., help="Message to send to AI"),
data_dir: Optional[Path] = typer.Option(None, "--data-dir", "-d", help="Data directory"),
model: Optional[str] = typer.Option(None, "--model", "-m", help="AI model to use"),
provider: Optional[str] = typer.Option(None, "--provider", help="AI provider (ollama/openai)")
):
"""Chat with the AI"""
persona = get_persona(data_dir)
# Create AI provider if specified
ai_provider = None
if provider and model:
try:
ai_provider = create_ai_provider(provider, model)
console.print(f"[dim]Using {provider} with model {model}[/dim]\n")
except Exception as e:
console.print(f"[yellow]Warning: Could not create AI provider: {e}[/yellow]")
console.print("[yellow]Falling back to simple responses[/yellow]\n")
# Process interaction
response, relationship_delta = persona.process_interaction(user_id, message, ai_provider)
# Get updated relationship
relationship = persona.relationships.get_or_create_relationship(user_id)
# Display response
console.print(Panel(response, title="AI Response", border_style="cyan"))
# Show relationship status
status_color = "green" if relationship.transmission_enabled else "yellow"
if relationship.is_broken:
status_color = "red"
console.print(f"\n[{status_color}]Relationship Status:[/{status_color}] {relationship.status.value}")
console.print(f"Score: {relationship.score:.2f} / {relationship.threshold}")
console.print(f"Transmission: {'✓ Enabled' if relationship.transmission_enabled else '✗ Disabled'}")
if relationship.is_broken:
console.print("[red]⚠️ This relationship is broken and cannot be repaired.[/red]")
@app.command()
def status(
user_id: Optional[str] = typer.Argument(None, help="User ID to check status for"),
data_dir: Optional[Path] = typer.Option(None, "--data-dir", "-d", help="Data directory")
):
"""Check AI status and relationships"""
persona = get_persona(data_dir)
state = persona.get_current_state()
# Show AI state
console.print(Panel(f"[cyan]ai.gpt Status[/cyan]", expand=False))
console.print(f"Mood: {state.current_mood}")
console.print(f"Fortune: {state.fortune.fortune_value}/10")
if state.fortune.breakthrough_triggered:
console.print("[yellow]⚡ Breakthrough triggered![/yellow]")
# Show personality traits
table = Table(title="Current Personality")
table.add_column("Trait", style="cyan")
table.add_column("Value", style="magenta")
for trait, value in state.base_personality.items():
table.add_row(trait.capitalize(), f"{value:.2f}")
console.print(table)
# Show specific relationship if requested
if user_id:
rel = persona.relationships.get_or_create_relationship(user_id)
console.print(f"\n[cyan]Relationship with {user_id}:[/cyan]")
console.print(f"Status: {rel.status.value}")
console.print(f"Score: {rel.score:.2f}")
console.print(f"Total Interactions: {rel.total_interactions}")
console.print(f"Transmission Enabled: {rel.transmission_enabled}")
@app.command()
def fortune(
data_dir: Optional[Path] = typer.Option(None, "--data-dir", "-d", help="Data directory")
):
"""Check today's AI fortune"""
persona = get_persona(data_dir)
fortune = persona.fortune_system.get_today_fortune()
# Fortune display
fortune_bar = "🌟" * fortune.fortune_value + "" * (10 - fortune.fortune_value)
console.print(Panel(
f"{fortune_bar}\n\n"
f"Today's Fortune: {fortune.fortune_value}/10\n"
f"Date: {fortune.date}",
title="AI Fortune",
border_style="yellow"
))
if fortune.consecutive_good > 0:
console.print(f"[green]Consecutive good days: {fortune.consecutive_good}[/green]")
if fortune.consecutive_bad > 0:
console.print(f"[red]Consecutive bad days: {fortune.consecutive_bad}[/red]")
if fortune.breakthrough_triggered:
console.print("\n[yellow]⚡ BREAKTHROUGH! Special fortune activated![/yellow]")
@app.command()
def transmit(
data_dir: Optional[Path] = typer.Option(None, "--data-dir", "-d", help="Data directory"),
dry_run: bool = typer.Option(True, "--dry-run/--execute", help="Dry run or execute")
):
"""Check and execute autonomous transmissions"""
persona = get_persona(data_dir)
controller = TransmissionController(persona, persona.data_dir)
eligible = controller.check_transmission_eligibility()
if not eligible:
console.print("[yellow]No users eligible for transmission.[/yellow]")
return
console.print(f"[green]Found {len(eligible)} eligible users for transmission:[/green]")
for user_id, rel in eligible.items():
message = controller.generate_transmission_message(user_id)
if message:
console.print(f"\n[cyan]To:[/cyan] {user_id}")
console.print(f"[cyan]Message:[/cyan] {message}")
console.print(f"[cyan]Relationship:[/cyan] {rel.status.value} (score: {rel.score:.2f})")
if not dry_run:
# In real implementation, send via atproto or other channel
controller.record_transmission(user_id, message, success=True)
console.print("[green]✓ Transmitted[/green]")
else:
console.print("[yellow]→ Would transmit (dry run)[/yellow]")
@app.command()
def maintenance(
data_dir: Optional[Path] = typer.Option(None, "--data-dir", "-d", help="Data directory")
):
"""Run daily maintenance tasks"""
persona = get_persona(data_dir)
console.print("[cyan]Running daily maintenance...[/cyan]")
persona.daily_maintenance()
console.print("[green]✓ Maintenance completed[/green]")
@app.command()
def relationships(
data_dir: Optional[Path] = typer.Option(None, "--data-dir", "-d", help="Data directory")
):
"""List all relationships"""
persona = get_persona(data_dir)
table = Table(title="All Relationships")
table.add_column("User ID", style="cyan")
table.add_column("Status", style="magenta")
table.add_column("Score", style="green")
table.add_column("Transmission", style="yellow")
table.add_column("Last Interaction")
for user_id, rel in persona.relationships.relationships.items():
transmission = "" if rel.transmission_enabled else ""
if rel.is_broken:
transmission = "💔"
last_interaction = rel.last_interaction.strftime("%Y-%m-%d") if rel.last_interaction else "Never"
table.add_row(
user_id[:16] + "...",
rel.status.value,
f"{rel.score:.2f}",
transmission,
last_interaction
)
console.print(table)
@app.command()
def server(
host: str = typer.Option("localhost", "--host", "-h", help="Server host"),
port: int = typer.Option(8000, "--port", "-p", help="Server port"),
data_dir: Optional[Path] = typer.Option(None, "--data-dir", "-d", help="Data directory"),
model: str = typer.Option("qwen2.5", "--model", "-m", help="AI model to use"),
provider: str = typer.Option("ollama", "--provider", help="AI provider (ollama/openai)"),
enable_card: bool = typer.Option(False, "--enable-card", help="Enable ai.card integration")
):
"""Run MCP server for AI integration"""
import uvicorn
if data_dir is None:
data_dir = DEFAULT_DATA_DIR
data_dir.mkdir(parents=True, exist_ok=True)
# Create MCP server
mcp_server = AIGptMcpServer(data_dir, enable_card_integration=enable_card)
app_instance = mcp_server.app
console.print(Panel(
f"[cyan]Starting ai.gpt MCP Server[/cyan]\n\n"
f"Host: {host}:{port}\n"
f"Provider: {provider}\n"
f"Model: {model}\n"
f"Data: {data_dir}\n"
f"Card Integration: {'✓ Enabled' if enable_card else '✗ Disabled'}",
title="MCP Server",
border_style="green"
))
# Store provider info in app state for later use
app_instance.state.ai_provider = provider
app_instance.state.ai_model = model
# Run server
uvicorn.run(app_instance, host=host, port=port)
@app.command()
def schedule(
action: str = typer.Argument(..., help="Action: add, list, enable, disable, remove, run"),
task_type: Optional[str] = typer.Argument(None, help="Task type for add action"),
schedule_expr: Optional[str] = typer.Argument(None, help="Schedule expression (cron or interval)"),
data_dir: Optional[Path] = typer.Option(None, "--data-dir", "-d", help="Data directory"),
task_id: Optional[str] = typer.Option(None, "--task-id", "-t", help="Task ID"),
provider: Optional[str] = typer.Option(None, "--provider", help="AI provider for transmission"),
model: Optional[str] = typer.Option(None, "--model", "-m", help="AI model for transmission")
):
"""Manage scheduled tasks"""
persona = get_persona(data_dir)
scheduler = AIScheduler(persona.data_dir, persona)
if action == "add":
if not task_type or not schedule_expr:
console.print("[red]Error: task_type and schedule required for add action[/red]")
return
# Parse task type
try:
task_type_enum = TaskType(task_type)
except ValueError:
console.print(f"[red]Invalid task type. Valid types: {', '.join([t.value for t in TaskType])}[/red]")
return
# Metadata for transmission tasks
metadata = {}
if task_type_enum == TaskType.TRANSMISSION_CHECK:
metadata["provider"] = provider or "ollama"
metadata["model"] = model or "qwen2.5"
try:
task = scheduler.add_task(task_type_enum, schedule_expr, task_id, metadata)
console.print(f"[green]✓ Added task {task.task_id}[/green]")
console.print(f"Type: {task.task_type.value}")
console.print(f"Schedule: {task.schedule}")
except ValueError as e:
console.print(f"[red]Error: {e}[/red]")
elif action == "list":
tasks = scheduler.get_tasks()
if not tasks:
console.print("[yellow]No scheduled tasks[/yellow]")
return
table = Table(title="Scheduled Tasks")
table.add_column("Task ID", style="cyan")
table.add_column("Type", style="magenta")
table.add_column("Schedule", style="green")
table.add_column("Enabled", style="yellow")
table.add_column("Last Run")
for task in tasks:
enabled = "" if task.enabled else ""
last_run = task.last_run.strftime("%Y-%m-%d %H:%M") if task.last_run else "Never"
table.add_row(
task.task_id[:20] + "..." if len(task.task_id) > 20 else task.task_id,
task.task_type.value,
task.schedule,
enabled,
last_run
)
console.print(table)
elif action == "enable":
if not task_id:
console.print("[red]Error: --task-id required for enable action[/red]")
return
scheduler.enable_task(task_id)
console.print(f"[green]✓ Enabled task {task_id}[/green]")
elif action == "disable":
if not task_id:
console.print("[red]Error: --task-id required for disable action[/red]")
return
scheduler.disable_task(task_id)
console.print(f"[yellow]✓ Disabled task {task_id}[/yellow]")
elif action == "remove":
if not task_id:
console.print("[red]Error: --task-id required for remove action[/red]")
return
scheduler.remove_task(task_id)
console.print(f"[red]✓ Removed task {task_id}[/red]")
elif action == "run":
console.print("[cyan]Starting scheduler daemon...[/cyan]")
console.print("Press Ctrl+C to stop\n")
import asyncio
async def run_scheduler():
scheduler.start()
try:
while True:
await asyncio.sleep(1)
except KeyboardInterrupt:
scheduler.stop()
try:
asyncio.run(run_scheduler())
except KeyboardInterrupt:
console.print("\n[yellow]Scheduler stopped[/yellow]")
else:
console.print(f"[red]Unknown action: {action}[/red]")
console.print("Valid actions: add, list, enable, disable, remove, run")
@app.command()
def shell(
data_dir: Optional[Path] = typer.Option(None, "--data-dir", "-d", help="Data directory"),
model: Optional[str] = typer.Option("qwen2.5", "--model", "-m", help="AI model to use"),
provider: Optional[str] = typer.Option("ollama", "--provider", help="AI provider (ollama/openai)")
):
"""Interactive shell mode (ai.shell)"""
persona = get_persona(data_dir)
# Create AI provider
ai_provider = None
if provider and model:
try:
ai_provider = create_ai_provider(provider, model)
console.print(f"[dim]Using {provider} with model {model}[/dim]\n")
except Exception as e:
console.print(f"[yellow]Warning: Could not create AI provider: {e}[/yellow]")
console.print("[yellow]Falling back to simple responses[/yellow]\n")
# Welcome message
console.print(Panel(
"[cyan]Welcome to ai.shell[/cyan]\n\n"
"Interactive AI-powered shell with command execution\n\n"
"Commands:\n"
" help - Show available commands\n"
" exit/quit - Exit shell\n"
" !<command> - Execute shell command\n"
" chat <message> - Chat with AI\n"
" status - Show AI status\n"
" clear - Clear screen\n\n"
"Type any message to interact with AI",
title="ai.shell",
border_style="green"
))
# Command completer with shell commands
builtin_commands = ['help', 'exit', 'quit', 'chat', 'status', 'clear', 'fortune', 'relationships', 'load']
# Add common shell commands
shell_commands = ['ls', 'cd', 'pwd', 'cat', 'echo', 'grep', 'find', 'mkdir', 'rm', 'cp', 'mv',
'git', 'python', 'pip', 'npm', 'node', 'cargo', 'rustc', 'docker', 'kubectl']
# AI-specific commands
ai_commands = ['analyze', 'generate', 'explain', 'optimize', 'refactor', 'test', 'document']
all_commands = builtin_commands + ['!' + cmd for cmd in shell_commands] + ai_commands
completer = WordCompleter(all_commands, ignore_case=True)
# History file
actual_data_dir = data_dir if data_dir else DEFAULT_DATA_DIR
history_file = actual_data_dir / "shell_history.txt"
history = FileHistory(str(history_file))
# Main shell loop
current_user = "shell_user" # Default user for shell sessions
while True:
try:
# Get input with completion
user_input = ptk_prompt(
"ai.shell> ",
completer=completer,
history=history,
auto_suggest=AutoSuggestFromHistory()
).strip()
if not user_input:
continue
# Exit commands
if user_input.lower() in ['exit', 'quit']:
console.print("[cyan]Goodbye![/cyan]")
break
# Help command
elif user_input.lower() == 'help':
console.print(Panel(
"[cyan]ai.shell Commands:[/cyan]\n\n"
" help - Show this help message\n"
" exit/quit - Exit the shell\n"
" !<command> - Execute a shell command\n"
" chat <message> - Explicitly chat with AI\n"
" status - Show AI status\n"
" fortune - Check AI fortune\n"
" relationships - List all relationships\n"
" clear - Clear the screen\n"
" load - Load aishell.md project file\n\n"
"[cyan]AI Commands:[/cyan]\n"
" analyze <file> - Analyze a file with AI\n"
" generate <desc> - Generate code from description\n"
" explain <topic> - Get AI explanation\n\n"
"You can also type any message to chat with AI\n"
"Use Tab for command completion",
title="Help",
border_style="yellow"
))
# Clear command
elif user_input.lower() == 'clear':
console.clear()
# Shell command execution
elif user_input.startswith('!'):
cmd = user_input[1:].strip()
if cmd:
try:
# Execute command
result = subprocess.run(
shlex.split(cmd),
capture_output=True,
text=True,
shell=False
)
if result.stdout:
console.print(result.stdout.rstrip())
if result.stderr:
console.print(f"[red]{result.stderr.rstrip()}[/red]")
if result.returncode != 0:
console.print(f"[red]Command exited with code {result.returncode}[/red]")
except FileNotFoundError:
console.print(f"[red]Command not found: {cmd.split()[0]}[/red]")
except Exception as e:
console.print(f"[red]Error executing command: {e}[/red]")
# Status command
elif user_input.lower() == 'status':
state = persona.get_current_state()
console.print(f"\nMood: {state.current_mood}")
console.print(f"Fortune: {state.fortune.fortune_value}/10")
rel = persona.relationships.get_or_create_relationship(current_user)
console.print(f"\nRelationship Status: {rel.status.value}")
console.print(f"Score: {rel.score:.2f} / {rel.threshold}")
# Fortune command
elif user_input.lower() == 'fortune':
fortune = persona.fortune_system.get_today_fortune()
fortune_bar = "🌟" * fortune.fortune_value + "" * (10 - fortune.fortune_value)
console.print(f"\n{fortune_bar}")
console.print(f"Today's Fortune: {fortune.fortune_value}/10")
# Relationships command
elif user_input.lower() == 'relationships':
if persona.relationships.relationships:
console.print("\n[cyan]Relationships:[/cyan]")
for user_id, rel in persona.relationships.relationships.items():
console.print(f" {user_id[:16]}... - {rel.status.value} ({rel.score:.2f})")
else:
console.print("[yellow]No relationships yet[/yellow]")
# Load aishell.md command
elif user_input.lower() in ['load', 'load aishell.md', 'project']:
# Try to find and load aishell.md
search_paths = [
Path.cwd() / "aishell.md",
Path.cwd() / "docs" / "aishell.md",
actual_data_dir.parent / "aishell.md",
Path.cwd() / "claude.md", # Also check for claude.md
]
loaded = False
for path in search_paths:
if path.exists():
console.print(f"[cyan]Loading project file: {path}[/cyan]")
with open(path, 'r', encoding='utf-8') as f:
content = f.read()
# Process with AI to understand project
load_prompt = f"I've loaded the project specification. Please analyze it and understand the project goals:\n\n{content[:3000]}"
response, _ = persona.process_interaction(current_user, load_prompt, ai_provider)
console.print(f"\n[green]Project loaded successfully![/green]")
console.print(f"[cyan]AI Understanding:[/cyan]\n{response}")
loaded = True
break
if not loaded:
console.print("[yellow]No aishell.md or claude.md found in project.[/yellow]")
console.print("Create aishell.md to define project goals and AI instructions.")
# AI-powered commands
elif user_input.lower().startswith('analyze '):
# Analyze file or code
target = user_input[8:].strip()
if os.path.exists(target):
console.print(f"[cyan]Analyzing {target}...[/cyan]")
with open(target, 'r') as f:
content = f.read()
analysis_prompt = f"Analyze this file and provide insights:\n\n{content[:2000]}"
response, _ = persona.process_interaction(current_user, analysis_prompt, ai_provider)
console.print(f"\n[cyan]Analysis:[/cyan]\n{response}")
else:
console.print(f"[red]File not found: {target}[/red]")
elif user_input.lower().startswith('generate '):
# Generate code
gen_prompt = user_input[9:].strip()
if gen_prompt:
console.print("[cyan]Generating code...[/cyan]")
full_prompt = f"Generate code for: {gen_prompt}. Provide clean, well-commented code."
response, _ = persona.process_interaction(current_user, full_prompt, ai_provider)
console.print(f"\n[cyan]Generated Code:[/cyan]\n{response}")
elif user_input.lower().startswith('explain '):
# Explain code or concept
topic = user_input[8:].strip()
if topic:
console.print(f"[cyan]Explaining {topic}...[/cyan]")
full_prompt = f"Explain this in detail: {topic}"
response, _ = persona.process_interaction(current_user, full_prompt, ai_provider)
console.print(f"\n[cyan]Explanation:[/cyan]\n{response}")
# Chat command or direct message
else:
# Remove 'chat' prefix if present
if user_input.lower().startswith('chat '):
message = user_input[5:].strip()
else:
message = user_input
if message:
# Process interaction with AI
response, relationship_delta = persona.process_interaction(
current_user, message, ai_provider
)
# Display response
console.print(f"\n[cyan]AI:[/cyan] {response}")
# Show relationship change if significant
if abs(relationship_delta) >= 0.1:
if relationship_delta > 0:
console.print(f"[green](+{relationship_delta:.2f} relationship)[/green]")
else:
console.print(f"[red]({relationship_delta:.2f} relationship)[/red]")
except KeyboardInterrupt:
console.print("\n[yellow]Use 'exit' or 'quit' to leave the shell[/yellow]")
except EOFError:
console.print("\n[cyan]Goodbye![/cyan]")
break
except Exception as e:
console.print(f"[red]Error: {e}[/red]")
@app.command()
def config(
action: str = typer.Argument(..., help="Action: get, set, delete, list"),
key: Optional[str] = typer.Argument(None, help="Configuration key (dot notation)"),
value: Optional[str] = typer.Argument(None, help="Value to set")
):
"""Manage configuration settings"""
if action == "get":
if not key:
console.print("[red]Error: key required for get action[/red]")
return
val = config.get(key)
if val is None:
console.print(f"[yellow]Key '{key}' not found[/yellow]")
else:
console.print(f"[cyan]{key}[/cyan] = [green]{val}[/green]")
elif action == "set":
if not key or value is None:
console.print("[red]Error: key and value required for set action[/red]")
return
# Special handling for sensitive keys
if "password" in key or "api_key" in key:
console.print(f"[cyan]Setting {key}[/cyan] = [dim]***hidden***[/dim]")
else:
console.print(f"[cyan]Setting {key}[/cyan] = [green]{value}[/green]")
config.set(key, value)
console.print("[green]✓ Configuration saved[/green]")
elif action == "delete":
if not key:
console.print("[red]Error: key required for delete action[/red]")
return
if config.delete(key):
console.print(f"[green]✓ Deleted {key}[/green]")
else:
console.print(f"[yellow]Key '{key}' not found[/yellow]")
elif action == "list":
keys = config.list_keys(key or "")
if not keys:
console.print("[yellow]No configuration keys found[/yellow]")
return
table = Table(title="Configuration Settings")
table.add_column("Key", style="cyan")
table.add_column("Value", style="green")
for k in sorted(keys):
val = config.get(k)
# Hide sensitive values
if "password" in k or "api_key" in k:
display_val = "***hidden***" if val else "not set"
else:
display_val = str(val) if val is not None else "not set"
table.add_row(k, display_val)
console.print(table)
else:
console.print(f"[red]Unknown action: {action}[/red]")
console.print("Valid actions: get, set, delete, list")
if __name__ == "__main__":
app()

145
src/aigpt/config.py Normal file
View File

@ -0,0 +1,145 @@
"""Configuration management for ai.gpt"""
import json
import os
from pathlib import Path
from typing import Optional, Dict, Any
import logging
class Config:
"""Manages configuration settings"""
def __init__(self, config_dir: Optional[Path] = None):
if config_dir is None:
config_dir = Path.home() / ".config" / "syui" / "ai" / "gpt"
self.config_dir = config_dir
self.config_file = config_dir / "config.json"
self.data_dir = config_dir / "data"
# Create directories if they don't exist
self.config_dir.mkdir(parents=True, exist_ok=True)
self.data_dir.mkdir(parents=True, exist_ok=True)
self.logger = logging.getLogger(__name__)
self._config: Dict[str, Any] = {}
self._load_config()
def _load_config(self):
"""Load configuration from file"""
if self.config_file.exists():
try:
with open(self.config_file, 'r', encoding='utf-8') as f:
self._config = json.load(f)
except Exception as e:
self.logger.error(f"Failed to load config: {e}")
self._config = {}
else:
# Initialize with default config
self._config = {
"providers": {
"openai": {
"api_key": None,
"default_model": "gpt-4o-mini"
},
"ollama": {
"host": "http://localhost:11434",
"default_model": "qwen2.5"
}
},
"atproto": {
"handle": None,
"password": None,
"host": "https://bsky.social"
},
"default_provider": "ollama"
}
self._save_config()
def _save_config(self):
"""Save configuration to file"""
try:
with open(self.config_file, 'w', encoding='utf-8') as f:
json.dump(self._config, f, indent=2)
except Exception as e:
self.logger.error(f"Failed to save config: {e}")
def get(self, key: str, default: Any = None) -> Any:
"""Get configuration value using dot notation"""
keys = key.split('.')
value = self._config
for k in keys:
if isinstance(value, dict) and k in value:
value = value[k]
else:
return default
return value
def set(self, key: str, value: Any):
"""Set configuration value using dot notation"""
keys = key.split('.')
config = self._config
# Navigate to the parent dictionary
for k in keys[:-1]:
if k not in config:
config[k] = {}
config = config[k]
# Set the value
config[keys[-1]] = value
self._save_config()
def delete(self, key: str) -> bool:
"""Delete configuration value"""
keys = key.split('.')
config = self._config
# Navigate to the parent dictionary
for k in keys[:-1]:
if k not in config:
return False
config = config[k]
# Delete the key if it exists
if keys[-1] in config:
del config[keys[-1]]
self._save_config()
return True
return False
def list_keys(self, prefix: str = "") -> list[str]:
"""List all configuration keys with optional prefix"""
def _get_keys(config: dict, current_prefix: str = "") -> list[str]:
keys = []
for k, v in config.items():
full_key = f"{current_prefix}.{k}" if current_prefix else k
if isinstance(v, dict):
keys.extend(_get_keys(v, full_key))
else:
keys.append(full_key)
return keys
all_keys = _get_keys(self._config)
if prefix:
return [k for k in all_keys if k.startswith(prefix)]
return all_keys
def get_api_key(self, provider: str) -> Optional[str]:
"""Get API key for a specific provider"""
key = self.get(f"providers.{provider}.api_key")
# Also check environment variables
if not key and provider == "openai":
key = os.getenv("OPENAI_API_KEY")
return key
def get_provider_config(self, provider: str) -> Dict[str, Any]:
"""Get complete configuration for a provider"""
return self.get(f"providers.{provider}", {})

118
src/aigpt/fortune.py Normal file
View File

@ -0,0 +1,118 @@
"""AI Fortune system for daily personality variations"""
import json
import random
from datetime import date, datetime, timedelta
from pathlib import Path
from typing import Optional
import logging
from .models import AIFortune
class FortuneSystem:
"""Manages daily AI fortune affecting personality"""
def __init__(self, data_dir: Path):
self.data_dir = data_dir
self.fortune_file = data_dir / "fortunes.json"
self.fortunes: dict[str, AIFortune] = {}
self.logger = logging.getLogger(__name__)
self._load_fortunes()
def _load_fortunes(self):
"""Load fortune history from storage"""
if self.fortune_file.exists():
with open(self.fortune_file, 'r', encoding='utf-8') as f:
data = json.load(f)
for date_str, fortune_data in data.items():
# Convert date string back to date object
fortune_data['date'] = datetime.fromisoformat(fortune_data['date']).date()
self.fortunes[date_str] = AIFortune(**fortune_data)
def _save_fortunes(self):
"""Save fortune history to storage"""
data = {}
for date_str, fortune in self.fortunes.items():
fortune_dict = fortune.model_dump(mode='json')
fortune_dict['date'] = fortune.date.isoformat()
data[date_str] = fortune_dict
with open(self.fortune_file, 'w', encoding='utf-8') as f:
json.dump(data, f, indent=2)
def get_today_fortune(self) -> AIFortune:
"""Get or generate today's fortune"""
today = date.today()
today_str = today.isoformat()
if today_str in self.fortunes:
return self.fortunes[today_str]
# Generate new fortune
fortune_value = random.randint(1, 10)
# Check yesterday's fortune for consecutive tracking
yesterday = (today - timedelta(days=1))
yesterday_str = yesterday.isoformat()
consecutive_good = 0
consecutive_bad = 0
breakthrough_triggered = False
if yesterday_str in self.fortunes:
yesterday_fortune = self.fortunes[yesterday_str]
if fortune_value >= 7: # Good fortune
if yesterday_fortune.fortune_value >= 7:
consecutive_good = yesterday_fortune.consecutive_good + 1
else:
consecutive_good = 1
elif fortune_value <= 3: # Bad fortune
if yesterday_fortune.fortune_value <= 3:
consecutive_bad = yesterday_fortune.consecutive_bad + 1
else:
consecutive_bad = 1
# Check breakthrough conditions
if consecutive_good >= 3:
breakthrough_triggered = True
self.logger.info("Breakthrough! 3 consecutive good fortunes!")
fortune_value = 10 # Max fortune on breakthrough
elif consecutive_bad >= 3:
breakthrough_triggered = True
self.logger.info("Breakthrough! 3 consecutive bad fortunes!")
fortune_value = random.randint(7, 10) # Good fortune after bad streak
fortune = AIFortune(
date=today,
fortune_value=fortune_value,
consecutive_good=consecutive_good,
consecutive_bad=consecutive_bad,
breakthrough_triggered=breakthrough_triggered
)
self.fortunes[today_str] = fortune
self._save_fortunes()
self.logger.info(f"Today's fortune: {fortune_value}/10")
return fortune
def get_personality_modifier(self, fortune: AIFortune) -> dict[str, float]:
"""Get personality modifiers based on fortune"""
base_modifier = fortune.fortune_value / 10.0
modifiers = {
"optimism": base_modifier,
"energy": base_modifier * 0.8,
"patience": 1.0 - (abs(5.5 - fortune.fortune_value) * 0.1),
"creativity": 0.5 + (base_modifier * 0.5),
"empathy": 0.7 + (base_modifier * 0.3)
}
# Breakthrough effects
if fortune.breakthrough_triggered:
modifiers["confidence"] = 1.0
modifiers["spontaneity"] = 0.9
return modifiers

318
src/aigpt/mcp_server.py Normal file
View File

@ -0,0 +1,318 @@
"""MCP Server for ai.gpt system"""
from typing import Optional, List, Dict, Any
from fastapi_mcp import FastApiMCP
from fastapi import FastAPI
from pathlib import Path
import logging
import subprocess
import os
import shlex
from .ai_provider import create_ai_provider
from .persona import Persona
from .models import Memory, Relationship, PersonaState
from .card_integration import CardIntegration, register_card_tools
logger = logging.getLogger(__name__)
class AIGptMcpServer:
"""MCP Server that exposes ai.gpt functionality to AI assistants"""
def __init__(self, data_dir: Path, enable_card_integration: bool = False):
self.data_dir = data_dir
self.persona = Persona(data_dir)
# Create FastAPI app
self.app = FastAPI(
title="AI.GPT Memory and Relationship System",
description="MCP server for ai.gpt system"
)
# Create MCP server with FastAPI app
self.server = FastApiMCP(self.app)
self.card_integration = None
if enable_card_integration:
self.card_integration = CardIntegration()
self._register_tools()
def _register_tools(self):
"""Register all MCP tools"""
@self.app.get("/get_memories", operation_id="get_memories")
async def get_memories(user_id: Optional[str] = None, limit: int = 10) -> List[Dict[str, Any]]:
"""Get active memories from the AI's memory system"""
memories = self.persona.memory.get_active_memories(limit=limit)
return [
{
"id": mem.id,
"content": mem.content,
"level": mem.level.value,
"importance": mem.importance_score,
"is_core": mem.is_core,
"timestamp": mem.timestamp.isoformat()
}
for mem in memories
]
@self.app.get("/get_relationship", operation_id="get_relationship")
async def get_relationship(user_id: str) -> Dict[str, Any]:
"""Get relationship status with a specific user"""
rel = self.persona.relationships.get_or_create_relationship(user_id)
return {
"user_id": rel.user_id,
"status": rel.status.value,
"score": rel.score,
"transmission_enabled": rel.transmission_enabled,
"is_broken": rel.is_broken,
"total_interactions": rel.total_interactions,
"last_interaction": rel.last_interaction.isoformat() if rel.last_interaction else None
}
@self.app.get("/get_all_relationships", operation_id="get_all_relationships")
async def get_all_relationships() -> List[Dict[str, Any]]:
"""Get all relationships"""
relationships = []
for user_id, rel in self.persona.relationships.relationships.items():
relationships.append({
"user_id": user_id,
"status": rel.status.value,
"score": rel.score,
"transmission_enabled": rel.transmission_enabled,
"is_broken": rel.is_broken
})
return relationships
@self.app.get("/get_persona_state", operation_id="get_persona_state")
async def get_persona_state() -> Dict[str, Any]:
"""Get current persona state including fortune and mood"""
state = self.persona.get_current_state()
return {
"mood": state.current_mood,
"fortune": {
"value": state.fortune.fortune_value,
"date": state.fortune.date.isoformat(),
"breakthrough": state.fortune.breakthrough_triggered
},
"personality": state.base_personality,
"active_memory_count": len(state.active_memories)
}
@self.app.post("/process_interaction", operation_id="process_interaction")
async def process_interaction(user_id: str, message: str) -> Dict[str, Any]:
"""Process an interaction with a user"""
response, relationship_delta = self.persona.process_interaction(user_id, message)
rel = self.persona.relationships.get_or_create_relationship(user_id)
return {
"response": response,
"relationship_delta": relationship_delta,
"new_relationship_score": rel.score,
"transmission_enabled": rel.transmission_enabled,
"relationship_status": rel.status.value
}
@self.app.get("/check_transmission_eligibility", operation_id="check_transmission_eligibility")
async def check_transmission_eligibility(user_id: str) -> Dict[str, Any]:
"""Check if AI can transmit to a specific user"""
can_transmit = self.persona.can_transmit_to(user_id)
rel = self.persona.relationships.get_or_create_relationship(user_id)
return {
"can_transmit": can_transmit,
"relationship_score": rel.score,
"threshold": rel.threshold,
"is_broken": rel.is_broken,
"transmission_enabled": rel.transmission_enabled
}
@self.app.get("/get_fortune", operation_id="get_fortune")
async def get_fortune() -> Dict[str, Any]:
"""Get today's AI fortune"""
fortune = self.persona.fortune_system.get_today_fortune()
modifiers = self.persona.fortune_system.get_personality_modifier(fortune)
return {
"value": fortune.fortune_value,
"date": fortune.date.isoformat(),
"consecutive_good": fortune.consecutive_good,
"consecutive_bad": fortune.consecutive_bad,
"breakthrough": fortune.breakthrough_triggered,
"personality_modifiers": modifiers
}
@self.app.post("/summarize_memories", operation_id="summarize_memories")
async def summarize_memories(user_id: str) -> Optional[Dict[str, Any]]:
"""Create a summary of recent memories for a user"""
summary = self.persona.memory.summarize_memories(user_id)
if summary:
return {
"id": summary.id,
"content": summary.content,
"level": summary.level.value,
"timestamp": summary.timestamp.isoformat()
}
return None
@self.app.post("/run_maintenance", operation_id="run_maintenance")
async def run_maintenance() -> Dict[str, str]:
"""Run daily maintenance tasks"""
self.persona.daily_maintenance()
return {"status": "Maintenance completed successfully"}
# Shell integration tools (ai.shell)
@self.app.post("/execute_command", operation_id="execute_command")
async def execute_command(command: str, working_dir: str = ".") -> Dict[str, Any]:
"""Execute a shell command"""
try:
result = subprocess.run(
shlex.split(command),
cwd=working_dir,
capture_output=True,
text=True,
timeout=60
)
return {
"status": "success" if result.returncode == 0 else "error",
"returncode": result.returncode,
"stdout": result.stdout,
"stderr": result.stderr,
"command": command
}
except subprocess.TimeoutExpired:
return {"error": "Command timed out"}
except Exception as e:
return {"error": str(e)}
@self.app.post("/analyze_file", operation_id="analyze_file")
async def analyze_file(file_path: str, analysis_prompt: str = "Analyze this file") -> Dict[str, Any]:
"""Analyze a file using AI"""
try:
if not os.path.exists(file_path):
return {"error": f"File not found: {file_path}"}
with open(file_path, 'r', encoding='utf-8') as f:
content = f.read()
# Get AI provider from app state
ai_provider = getattr(self.app.state, 'ai_provider', 'ollama')
ai_model = getattr(self.app.state, 'ai_model', 'qwen2.5')
provider = create_ai_provider(ai_provider, ai_model)
# Analyze with AI
prompt = f"{analysis_prompt}\n\nFile: {file_path}\n\nContent:\n{content}"
analysis = provider.generate_response(prompt, "You are a code analyst.")
return {
"analysis": analysis,
"file_path": file_path,
"file_size": len(content),
"line_count": len(content.split('\n'))
}
except Exception as e:
return {"error": str(e)}
@self.app.post("/write_file", operation_id="write_file")
async def write_file(file_path: str, content: str, backup: bool = True) -> Dict[str, Any]:
"""Write content to a file"""
try:
file_path_obj = Path(file_path)
# Create backup if requested
backup_path = None
if backup and file_path_obj.exists():
backup_path = f"{file_path}.backup"
with open(file_path, 'r', encoding='utf-8') as src:
with open(backup_path, 'w', encoding='utf-8') as dst:
dst.write(src.read())
# Write file
file_path_obj.parent.mkdir(parents=True, exist_ok=True)
with open(file_path, 'w', encoding='utf-8') as f:
f.write(content)
return {
"status": "success",
"file_path": file_path,
"backup_path": backup_path,
"bytes_written": len(content.encode('utf-8'))
}
except Exception as e:
return {"error": str(e)}
@self.app.get("/read_project_file", operation_id="read_project_file")
async def read_project_file(file_name: str = "aishell.md") -> Dict[str, Any]:
"""Read project files like aishell.md (similar to claude.md)"""
try:
# Check common locations
search_paths = [
Path.cwd() / file_name,
Path.cwd() / "docs" / file_name,
self.data_dir.parent / file_name,
]
for path in search_paths:
if path.exists():
with open(path, 'r', encoding='utf-8') as f:
content = f.read()
return {
"content": content,
"path": str(path),
"exists": True
}
return {
"exists": False,
"searched_paths": [str(p) for p in search_paths],
"error": f"{file_name} not found"
}
except Exception as e:
return {"error": str(e)}
@self.app.get("/list_files", operation_id="list_files")
async def list_files(directory: str = ".", pattern: str = "*") -> Dict[str, Any]:
"""List files in a directory"""
try:
dir_path = Path(directory)
if not dir_path.exists():
return {"error": f"Directory not found: {directory}"}
files = []
for item in dir_path.glob(pattern):
files.append({
"name": item.name,
"path": str(item),
"is_file": item.is_file(),
"is_dir": item.is_dir(),
"size": item.stat().st_size if item.is_file() else None
})
return {
"directory": directory,
"pattern": pattern,
"files": files,
"count": len(files)
}
except Exception as e:
return {"error": str(e)}
# Register ai.card tools if integration is enabled
if self.card_integration:
register_card_tools(self.app, self.card_integration)
# Mount MCP server
self.server.mount()
def get_server(self) -> FastApiMCP:
"""Get the FastAPI MCP server instance"""
return self.server
async def close(self):
"""Cleanup resources"""
if self.card_integration:
await self.card_integration.close()

View File

@ -0,0 +1,146 @@
"""Simple MCP Server implementation for ai.gpt"""
from mcp import Server
from mcp.types import Tool, TextContent
from pathlib import Path
from typing import Any, Dict, List, Optional
import json
from .persona import Persona
from .ai_provider import create_ai_provider
import subprocess
import os
def create_mcp_server(data_dir: Path, enable_card: bool = False) -> Server:
"""Create MCP server with ai.gpt tools"""
server = Server("aigpt")
persona = Persona(data_dir)
@server.tool()
async def get_memories(limit: int = 10) -> List[Dict[str, Any]]:
"""Get active memories from the AI's memory system"""
memories = persona.memory.get_active_memories(limit=limit)
return [
{
"id": mem.id,
"content": mem.content,
"level": mem.level.value,
"importance": mem.importance_score,
"is_core": mem.is_core,
"timestamp": mem.timestamp.isoformat()
}
for mem in memories
]
@server.tool()
async def get_relationship(user_id: str) -> Dict[str, Any]:
"""Get relationship status with a specific user"""
rel = persona.relationships.get_or_create_relationship(user_id)
return {
"user_id": rel.user_id,
"status": rel.status.value,
"score": rel.score,
"transmission_enabled": rel.transmission_enabled,
"is_broken": rel.is_broken,
"total_interactions": rel.total_interactions,
"last_interaction": rel.last_interaction.isoformat() if rel.last_interaction else None
}
@server.tool()
async def process_interaction(user_id: str, message: str, provider: str = "ollama", model: str = "qwen2.5") -> Dict[str, Any]:
"""Process an interaction with a user"""
ai_provider = create_ai_provider(provider, model)
response, relationship_delta = persona.process_interaction(user_id, message, ai_provider)
rel = persona.relationships.get_or_create_relationship(user_id)
return {
"response": response,
"relationship_delta": relationship_delta,
"new_relationship_score": rel.score,
"transmission_enabled": rel.transmission_enabled,
"relationship_status": rel.status.value
}
@server.tool()
async def get_fortune() -> Dict[str, Any]:
"""Get today's AI fortune"""
fortune = persona.fortune_system.get_today_fortune()
modifiers = persona.fortune_system.get_personality_modifier(fortune)
return {
"value": fortune.fortune_value,
"date": fortune.date.isoformat(),
"consecutive_good": fortune.consecutive_good,
"consecutive_bad": fortune.consecutive_bad,
"breakthrough": fortune.breakthrough_triggered,
"personality_modifiers": modifiers
}
@server.tool()
async def execute_command(command: str, working_dir: str = ".") -> Dict[str, Any]:
"""Execute a shell command"""
try:
import shlex
result = subprocess.run(
shlex.split(command),
cwd=working_dir,
capture_output=True,
text=True,
timeout=60
)
return {
"status": "success" if result.returncode == 0 else "error",
"returncode": result.returncode,
"stdout": result.stdout,
"stderr": result.stderr,
"command": command
}
except subprocess.TimeoutExpired:
return {"error": "Command timed out"}
except Exception as e:
return {"error": str(e)}
@server.tool()
async def analyze_file(file_path: str) -> Dict[str, Any]:
"""Analyze a file using AI"""
try:
if not os.path.exists(file_path):
return {"error": f"File not found: {file_path}"}
with open(file_path, 'r', encoding='utf-8') as f:
content = f.read()
ai_provider = create_ai_provider("ollama", "qwen2.5")
prompt = f"Analyze this file and provide insights:\\n\\nFile: {file_path}\\n\\nContent:\\n{content[:2000]}"
analysis = ai_provider.generate_response(prompt, "You are a code analyst.")
return {
"analysis": analysis,
"file_path": file_path,
"file_size": len(content),
"line_count": len(content.split('\\n'))
}
except Exception as e:
return {"error": str(e)}
return server
async def main():
"""Run MCP server"""
import sys
from mcp import stdio_server
data_dir = Path.home() / ".config" / "syui" / "ai" / "gpt" / "data"
data_dir.mkdir(parents=True, exist_ok=True)
server = create_mcp_server(data_dir)
await stdio_server(server)
if __name__ == "__main__":
import asyncio
asyncio.run(main())

155
src/aigpt/memory.py Normal file
View File

@ -0,0 +1,155 @@
"""Memory management system for ai.gpt"""
import json
import hashlib
from datetime import datetime, timedelta
from pathlib import Path
from typing import List, Optional, Dict, Any
import logging
from .models import Memory, MemoryLevel, Conversation
class MemoryManager:
"""Manages AI's memory with hierarchical storage and forgetting"""
def __init__(self, data_dir: Path):
self.data_dir = data_dir
self.memories_file = data_dir / "memories.json"
self.conversations_file = data_dir / "conversations.json"
self.memories: Dict[str, Memory] = {}
self.conversations: List[Conversation] = []
self.logger = logging.getLogger(__name__)
self._load_memories()
def _load_memories(self):
"""Load memories from persistent storage"""
if self.memories_file.exists():
with open(self.memories_file, 'r', encoding='utf-8') as f:
data = json.load(f)
for mem_data in data:
memory = Memory(**mem_data)
self.memories[memory.id] = memory
if self.conversations_file.exists():
with open(self.conversations_file, 'r', encoding='utf-8') as f:
data = json.load(f)
self.conversations = [Conversation(**conv) for conv in data]
def _save_memories(self):
"""Save memories to persistent storage"""
memories_data = [mem.model_dump(mode='json') for mem in self.memories.values()]
with open(self.memories_file, 'w', encoding='utf-8') as f:
json.dump(memories_data, f, indent=2, default=str)
conv_data = [conv.model_dump(mode='json') for conv in self.conversations]
with open(self.conversations_file, 'w', encoding='utf-8') as f:
json.dump(conv_data, f, indent=2, default=str)
def add_conversation(self, conversation: Conversation) -> Memory:
"""Add a conversation and create memory from it"""
self.conversations.append(conversation)
# Create memory from conversation
memory_id = hashlib.sha256(
f"{conversation.id}{conversation.timestamp}".encode()
).hexdigest()[:16]
memory = Memory(
id=memory_id,
timestamp=conversation.timestamp,
content=f"User: {conversation.user_message}\nAI: {conversation.ai_response}",
level=MemoryLevel.FULL_LOG,
importance_score=abs(conversation.relationship_delta) * 0.1
)
self.memories[memory.id] = memory
self._save_memories()
return memory
def summarize_memories(self, user_id: str) -> Optional[Memory]:
"""Create summary from recent memories"""
recent_memories = [
mem for mem in self.memories.values()
if mem.level == MemoryLevel.FULL_LOG
and (datetime.now() - mem.timestamp).days < 7
]
if len(recent_memories) < 5:
return None
# Simple summary creation (in real implementation, use AI)
summary_content = f"Summary of {len(recent_memories)} recent interactions"
summary_id = hashlib.sha256(
f"summary_{datetime.now().isoformat()}".encode()
).hexdigest()[:16]
summary = Memory(
id=summary_id,
timestamp=datetime.now(),
content=summary_content,
summary=summary_content,
level=MemoryLevel.SUMMARY,
importance_score=0.5
)
self.memories[summary.id] = summary
# Mark summarized memories for potential forgetting
for mem in recent_memories:
mem.importance_score *= 0.9
self._save_memories()
return summary
def identify_core_memories(self) -> List[Memory]:
"""Identify memories that should become core (never forgotten)"""
core_candidates = [
mem for mem in self.memories.values()
if mem.importance_score > 0.8
and not mem.is_core
and mem.level != MemoryLevel.FORGOTTEN
]
for memory in core_candidates:
memory.is_core = True
memory.level = MemoryLevel.CORE
self.logger.info(f"Memory {memory.id} promoted to core")
self._save_memories()
return core_candidates
def apply_forgetting(self):
"""Apply selective forgetting based on importance and time"""
now = datetime.now()
for memory in self.memories.values():
if memory.is_core or memory.level == MemoryLevel.FORGOTTEN:
continue
# Time-based decay
age_days = (now - memory.timestamp).days
decay_factor = memory.decay_rate * age_days
memory.importance_score -= decay_factor
# Forget unimportant old memories
if memory.importance_score <= 0.1 and age_days > 30:
memory.level = MemoryLevel.FORGOTTEN
self.logger.info(f"Memory {memory.id} forgotten")
self._save_memories()
def get_active_memories(self, limit: int = 10) -> List[Memory]:
"""Get currently active memories for persona"""
active = [
mem for mem in self.memories.values()
if mem.level != MemoryLevel.FORGOTTEN
]
# Sort by importance and recency
active.sort(
key=lambda m: (m.is_core, m.importance_score, m.timestamp),
reverse=True
)
return active[:limit]

79
src/aigpt/models.py Normal file
View File

@ -0,0 +1,79 @@
"""Data models for ai.gpt system"""
from datetime import datetime, date
from typing import Optional, Dict, List, Any
from enum import Enum
from pydantic import BaseModel, Field
class MemoryLevel(str, Enum):
"""Memory importance levels"""
FULL_LOG = "full_log"
SUMMARY = "summary"
CORE = "core"
FORGOTTEN = "forgotten"
class RelationshipStatus(str, Enum):
"""Relationship status levels"""
STRANGER = "stranger"
ACQUAINTANCE = "acquaintance"
FRIEND = "friend"
CLOSE_FRIEND = "close_friend"
BROKEN = "broken" # 不可逆
class Memory(BaseModel):
"""Single memory unit"""
id: str
timestamp: datetime
content: str
summary: Optional[str] = None
level: MemoryLevel = MemoryLevel.FULL_LOG
importance_score: float = Field(ge=0.0, le=1.0)
is_core: bool = False
decay_rate: float = 0.01
class Relationship(BaseModel):
"""Relationship with a specific user"""
user_id: str # atproto DID
status: RelationshipStatus = RelationshipStatus.STRANGER
score: float = 0.0
daily_interactions: int = 0
total_interactions: int = 0
last_interaction: Optional[datetime] = None
transmission_enabled: bool = False
threshold: float = 100.0
decay_rate: float = 0.1
daily_limit: int = 10
is_broken: bool = False
class AIFortune(BaseModel):
"""Daily AI fortune affecting personality"""
date: date
fortune_value: int = Field(ge=1, le=10)
consecutive_good: int = 0
consecutive_bad: int = 0
breakthrough_triggered: bool = False
class PersonaState(BaseModel):
"""Current persona state"""
base_personality: Dict[str, float]
current_mood: str
fortune: AIFortune
active_memories: List[str] # Memory IDs
relationship_modifiers: Dict[str, float]
class Conversation(BaseModel):
"""Conversation log entry"""
id: str
user_id: str
timestamp: datetime
user_message: str
ai_response: str
relationship_delta: float = 0.0
memory_created: bool = False

181
src/aigpt/persona.py Normal file
View File

@ -0,0 +1,181 @@
"""Persona management system integrating memory, relationships, and fortune"""
import json
from datetime import datetime
from pathlib import Path
from typing import Dict, List, Optional
import logging
from .models import PersonaState, Conversation
from .memory import MemoryManager
from .relationship import RelationshipTracker
from .fortune import FortuneSystem
class Persona:
"""AI persona with unique characteristics based on interactions"""
def __init__(self, data_dir: Path, name: str = "ai"):
self.data_dir = data_dir
self.name = name
self.memory = MemoryManager(data_dir)
self.relationships = RelationshipTracker(data_dir)
self.fortune_system = FortuneSystem(data_dir)
self.logger = logging.getLogger(__name__)
# Base personality traits
self.base_personality = {
"curiosity": 0.7,
"empathy": 0.8,
"creativity": 0.6,
"patience": 0.7,
"optimism": 0.6
}
self.state_file = data_dir / "persona_state.json"
self._load_state()
def _load_state(self):
"""Load persona state from storage"""
if self.state_file.exists():
with open(self.state_file, 'r', encoding='utf-8') as f:
data = json.load(f)
self.base_personality = data.get("base_personality", self.base_personality)
def _save_state(self):
"""Save persona state to storage"""
state_data = {
"base_personality": self.base_personality,
"last_updated": datetime.now().isoformat()
}
with open(self.state_file, 'w', encoding='utf-8') as f:
json.dump(state_data, f, indent=2)
def get_current_state(self) -> PersonaState:
"""Get current persona state including all modifiers"""
# Get today's fortune
fortune = self.fortune_system.get_today_fortune()
fortune_modifiers = self.fortune_system.get_personality_modifier(fortune)
# Apply fortune modifiers to base personality
current_personality = {}
for trait, base_value in self.base_personality.items():
modifier = fortune_modifiers.get(trait, 1.0)
current_personality[trait] = min(1.0, base_value * modifier)
# Get active memories for context
active_memories = self.memory.get_active_memories(limit=5)
# Determine mood based on fortune and recent interactions
mood = self._determine_mood(fortune.fortune_value)
state = PersonaState(
base_personality=current_personality,
current_mood=mood,
fortune=fortune,
active_memories=[mem.id for mem in active_memories],
relationship_modifiers={}
)
return state
def _determine_mood(self, fortune_value: int) -> str:
"""Determine current mood based on fortune and other factors"""
if fortune_value >= 8:
return "joyful"
elif fortune_value >= 6:
return "cheerful"
elif fortune_value >= 4:
return "neutral"
elif fortune_value >= 2:
return "melancholic"
else:
return "contemplative"
def process_interaction(self, user_id: str, message: str, ai_provider=None) -> tuple[str, float]:
"""Process user interaction and generate response"""
# Get current state
state = self.get_current_state()
# Get relationship with user
relationship = self.relationships.get_or_create_relationship(user_id)
# Simple response generation (use AI provider if available)
if relationship.is_broken:
response = "..."
relationship_delta = 0.0
else:
if ai_provider:
# Use AI provider for response generation
memories = self.memory.get_active_memories(limit=5)
import asyncio
response = asyncio.run(
ai_provider.generate_response(message, state, memories)
)
# Calculate relationship delta based on interaction quality
if state.current_mood in ["joyful", "cheerful"]:
relationship_delta = 2.0
elif relationship.status.value == "close_friend":
relationship_delta = 1.5
else:
relationship_delta = 1.0
else:
# Fallback to simple responses
if state.current_mood == "joyful":
response = f"What a wonderful day! {message} sounds interesting!"
relationship_delta = 2.0
elif relationship.status.value == "close_friend":
response = f"I've been thinking about our conversations. {message}"
relationship_delta = 1.5
else:
response = f"I understand. {message}"
relationship_delta = 1.0
# Create conversation record
conv_id = f"{user_id}_{datetime.now().timestamp()}"
conversation = Conversation(
id=conv_id,
user_id=user_id,
timestamp=datetime.now(),
user_message=message,
ai_response=response,
relationship_delta=relationship_delta,
memory_created=True
)
# Update memory
self.memory.add_conversation(conversation)
# Update relationship
self.relationships.update_interaction(user_id, relationship_delta)
return response, relationship_delta
def can_transmit_to(self, user_id: str) -> bool:
"""Check if AI can transmit messages to this user"""
relationship = self.relationships.get_or_create_relationship(user_id)
return relationship.transmission_enabled and not relationship.is_broken
def daily_maintenance(self):
"""Perform daily maintenance tasks"""
self.logger.info("Performing daily maintenance...")
# Apply time decay to relationships
self.relationships.apply_time_decay()
# Apply forgetting to memories
self.memory.apply_forgetting()
# Identify core memories
core_memories = self.memory.identify_core_memories()
if core_memories:
self.logger.info(f"Identified {len(core_memories)} new core memories")
# Create memory summaries
for user_id in self.relationships.relationships:
summary = self.memory.summarize_memories(user_id)
if summary:
self.logger.info(f"Created summary for interactions with {user_id}")
self._save_state()
self.logger.info("Daily maintenance completed")

135
src/aigpt/relationship.py Normal file
View File

@ -0,0 +1,135 @@
"""Relationship tracking system with irreversible damage"""
import json
from datetime import datetime, timedelta
from pathlib import Path
from typing import Dict, Optional
import logging
from .models import Relationship, RelationshipStatus
class RelationshipTracker:
"""Tracks and manages relationships with users"""
def __init__(self, data_dir: Path):
self.data_dir = data_dir
self.relationships_file = data_dir / "relationships.json"
self.relationships: Dict[str, Relationship] = {}
self.logger = logging.getLogger(__name__)
self._load_relationships()
def _load_relationships(self):
"""Load relationships from persistent storage"""
if self.relationships_file.exists():
with open(self.relationships_file, 'r', encoding='utf-8') as f:
data = json.load(f)
for user_id, rel_data in data.items():
self.relationships[user_id] = Relationship(**rel_data)
def _save_relationships(self):
"""Save relationships to persistent storage"""
data = {
user_id: rel.model_dump(mode='json')
for user_id, rel in self.relationships.items()
}
with open(self.relationships_file, 'w', encoding='utf-8') as f:
json.dump(data, f, indent=2, default=str)
def get_or_create_relationship(self, user_id: str) -> Relationship:
"""Get existing relationship or create new one"""
if user_id not in self.relationships:
self.relationships[user_id] = Relationship(user_id=user_id)
self._save_relationships()
return self.relationships[user_id]
def update_interaction(self, user_id: str, delta: float) -> Relationship:
"""Update relationship based on interaction"""
rel = self.get_or_create_relationship(user_id)
# Check if relationship is broken (irreversible)
if rel.is_broken:
self.logger.warning(f"Relationship with {user_id} is broken. No updates allowed.")
return rel
# Check daily limit
if rel.last_interaction and rel.last_interaction.date() == datetime.now().date():
if rel.daily_interactions >= rel.daily_limit:
self.logger.info(f"Daily interaction limit reached for {user_id}")
return rel
else:
rel.daily_interactions = 0
# Update interaction counts
rel.daily_interactions += 1
rel.total_interactions += 1
rel.last_interaction = datetime.now()
# Update score with bounds
old_score = rel.score
rel.score += delta
rel.score = max(0.0, min(200.0, rel.score)) # 0-200 range
# Check for relationship damage
if delta < -10.0: # Significant negative interaction
self.logger.warning(f"Major relationship damage with {user_id}: {delta}")
if rel.score <= 0:
rel.is_broken = True
rel.status = RelationshipStatus.BROKEN
rel.transmission_enabled = False
self.logger.error(f"Relationship with {user_id} is now BROKEN (irreversible)")
# Update relationship status based on score
if not rel.is_broken:
if rel.score >= 150:
rel.status = RelationshipStatus.CLOSE_FRIEND
elif rel.score >= 100:
rel.status = RelationshipStatus.FRIEND
elif rel.score >= 50:
rel.status = RelationshipStatus.ACQUAINTANCE
else:
rel.status = RelationshipStatus.STRANGER
# Check transmission threshold
if rel.score >= rel.threshold and not rel.transmission_enabled:
rel.transmission_enabled = True
self.logger.info(f"Transmission enabled for {user_id}!")
self._save_relationships()
return rel
def apply_time_decay(self):
"""Apply time-based decay to all relationships"""
now = datetime.now()
for user_id, rel in self.relationships.items():
if rel.is_broken or not rel.last_interaction:
continue
# Calculate days since last interaction
days_inactive = (now - rel.last_interaction).days
if days_inactive > 0:
# Apply decay
decay_amount = rel.decay_rate * days_inactive
old_score = rel.score
rel.score = max(0.0, rel.score - decay_amount)
# Update status if score dropped
if rel.score < rel.threshold:
rel.transmission_enabled = False
if decay_amount > 0:
self.logger.info(
f"Applied decay to {user_id}: {old_score:.2f} -> {rel.score:.2f}"
)
self._save_relationships()
def get_transmission_eligible(self) -> Dict[str, Relationship]:
"""Get all relationships eligible for transmission"""
return {
user_id: rel
for user_id, rel in self.relationships.items()
if rel.transmission_enabled and not rel.is_broken
}

312
src/aigpt/scheduler.py Normal file
View File

@ -0,0 +1,312 @@
"""Scheduler for autonomous AI tasks"""
import json
import asyncio
from datetime import datetime, timedelta
from pathlib import Path
from typing import Dict, List, Optional, Any, Callable
from enum import Enum
import logging
from apscheduler.schedulers.asyncio import AsyncIOScheduler
from apscheduler.triggers.cron import CronTrigger
from apscheduler.triggers.interval import IntervalTrigger
from croniter import croniter
from .persona import Persona
from .transmission import TransmissionController
from .ai_provider import create_ai_provider
class TaskType(str, Enum):
"""Types of scheduled tasks"""
TRANSMISSION_CHECK = "transmission_check"
MAINTENANCE = "maintenance"
FORTUNE_UPDATE = "fortune_update"
RELATIONSHIP_DECAY = "relationship_decay"
MEMORY_SUMMARY = "memory_summary"
CUSTOM = "custom"
class ScheduledTask:
"""Represents a scheduled task"""
def __init__(
self,
task_id: str,
task_type: TaskType,
schedule: str, # Cron expression or interval
enabled: bool = True,
last_run: Optional[datetime] = None,
next_run: Optional[datetime] = None,
metadata: Optional[Dict[str, Any]] = None
):
self.task_id = task_id
self.task_type = task_type
self.schedule = schedule
self.enabled = enabled
self.last_run = last_run
self.next_run = next_run
self.metadata = metadata or {}
def to_dict(self) -> Dict[str, Any]:
"""Convert to dictionary for storage"""
return {
"task_id": self.task_id,
"task_type": self.task_type.value,
"schedule": self.schedule,
"enabled": self.enabled,
"last_run": self.last_run.isoformat() if self.last_run else None,
"next_run": self.next_run.isoformat() if self.next_run else None,
"metadata": self.metadata
}
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "ScheduledTask":
"""Create from dictionary"""
return cls(
task_id=data["task_id"],
task_type=TaskType(data["task_type"]),
schedule=data["schedule"],
enabled=data.get("enabled", True),
last_run=datetime.fromisoformat(data["last_run"]) if data.get("last_run") else None,
next_run=datetime.fromisoformat(data["next_run"]) if data.get("next_run") else None,
metadata=data.get("metadata", {})
)
class AIScheduler:
"""Manages scheduled tasks for the AI system"""
def __init__(self, data_dir: Path, persona: Persona):
self.data_dir = data_dir
self.persona = persona
self.tasks_file = data_dir / "scheduled_tasks.json"
self.tasks: Dict[str, ScheduledTask] = {}
self.scheduler = AsyncIOScheduler()
self.logger = logging.getLogger(__name__)
self._load_tasks()
# Task handlers
self.task_handlers: Dict[TaskType, Callable] = {
TaskType.TRANSMISSION_CHECK: self._handle_transmission_check,
TaskType.MAINTENANCE: self._handle_maintenance,
TaskType.FORTUNE_UPDATE: self._handle_fortune_update,
TaskType.RELATIONSHIP_DECAY: self._handle_relationship_decay,
TaskType.MEMORY_SUMMARY: self._handle_memory_summary,
}
def _load_tasks(self):
"""Load scheduled tasks from storage"""
if self.tasks_file.exists():
with open(self.tasks_file, 'r', encoding='utf-8') as f:
data = json.load(f)
for task_data in data:
task = ScheduledTask.from_dict(task_data)
self.tasks[task.task_id] = task
def _save_tasks(self):
"""Save scheduled tasks to storage"""
tasks_data = [task.to_dict() for task in self.tasks.values()]
with open(self.tasks_file, 'w', encoding='utf-8') as f:
json.dump(tasks_data, f, indent=2, default=str)
def add_task(
self,
task_type: TaskType,
schedule: str,
task_id: Optional[str] = None,
metadata: Optional[Dict[str, Any]] = None
) -> ScheduledTask:
"""Add a new scheduled task"""
if task_id is None:
task_id = f"{task_type.value}_{datetime.now().timestamp()}"
# Validate schedule
if not self._validate_schedule(schedule):
raise ValueError(f"Invalid schedule expression: {schedule}")
task = ScheduledTask(
task_id=task_id,
task_type=task_type,
schedule=schedule,
metadata=metadata
)
self.tasks[task_id] = task
self._save_tasks()
# Schedule the task if scheduler is running
if self.scheduler.running:
self._schedule_task(task)
self.logger.info(f"Added task {task_id} with schedule {schedule}")
return task
def _validate_schedule(self, schedule: str) -> bool:
"""Validate schedule expression"""
# Check if it's a cron expression
if ' ' in schedule:
try:
croniter(schedule)
return True
except:
return False
# Check if it's an interval expression (e.g., "5m", "1h", "2d")
import re
pattern = r'^\d+[smhd]$'
return bool(re.match(pattern, schedule))
def _parse_interval(self, interval: str) -> int:
"""Parse interval string to seconds"""
unit = interval[-1]
value = int(interval[:-1])
multipliers = {
's': 1,
'm': 60,
'h': 3600,
'd': 86400
}
return value * multipliers.get(unit, 1)
def _schedule_task(self, task: ScheduledTask):
"""Schedule a task with APScheduler"""
if not task.enabled:
return
handler = self.task_handlers.get(task.task_type)
if not handler:
self.logger.warning(f"No handler for task type {task.task_type}")
return
# Determine trigger
if ' ' in task.schedule:
# Cron expression
trigger = CronTrigger.from_crontab(task.schedule)
else:
# Interval expression
seconds = self._parse_interval(task.schedule)
trigger = IntervalTrigger(seconds=seconds)
# Add job
self.scheduler.add_job(
lambda: asyncio.create_task(self._run_task(task)),
trigger=trigger,
id=task.task_id,
replace_existing=True
)
async def _run_task(self, task: ScheduledTask):
"""Run a scheduled task"""
self.logger.info(f"Running task {task.task_id}")
task.last_run = datetime.now()
try:
handler = self.task_handlers.get(task.task_type)
if handler:
await handler(task)
else:
self.logger.warning(f"No handler for task type {task.task_type}")
except Exception as e:
self.logger.error(f"Error running task {task.task_id}: {e}")
self._save_tasks()
async def _handle_transmission_check(self, task: ScheduledTask):
"""Check and execute autonomous transmissions"""
controller = TransmissionController(self.persona, self.data_dir)
eligible = controller.check_transmission_eligibility()
# Get AI provider from metadata
provider_name = task.metadata.get("provider", "ollama")
model = task.metadata.get("model", "qwen2.5")
try:
ai_provider = create_ai_provider(provider_name, model)
except:
ai_provider = None
for user_id, rel in eligible.items():
message = controller.generate_transmission_message(user_id)
if message:
# For now, just print the message
print(f"\n🤖 [AI Transmission] {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
print(f"To: {user_id}")
print(f"Relationship: {rel.status.value} (score: {rel.score:.2f})")
print(f"Message: {message}")
print("-" * 50)
controller.record_transmission(user_id, message, success=True)
self.logger.info(f"Transmitted to {user_id}: {message}")
async def _handle_maintenance(self, task: ScheduledTask):
"""Run daily maintenance"""
self.persona.daily_maintenance()
self.logger.info("Daily maintenance completed")
async def _handle_fortune_update(self, task: ScheduledTask):
"""Update AI fortune"""
fortune = self.persona.fortune_system.get_today_fortune()
self.logger.info(f"Fortune updated: {fortune.fortune_value}/10")
async def _handle_relationship_decay(self, task: ScheduledTask):
"""Apply relationship decay"""
self.persona.relationships.apply_time_decay()
self.logger.info("Relationship decay applied")
async def _handle_memory_summary(self, task: ScheduledTask):
"""Create memory summaries"""
for user_id in self.persona.relationships.relationships:
summary = self.persona.memory.summarize_memories(user_id)
if summary:
self.logger.info(f"Created memory summary for {user_id}")
def start(self):
"""Start the scheduler"""
# Schedule all enabled tasks
for task in self.tasks.values():
if task.enabled:
self._schedule_task(task)
self.scheduler.start()
self.logger.info("Scheduler started")
def stop(self):
"""Stop the scheduler"""
self.scheduler.shutdown()
self.logger.info("Scheduler stopped")
def get_tasks(self) -> List[ScheduledTask]:
"""Get all scheduled tasks"""
return list(self.tasks.values())
def enable_task(self, task_id: str):
"""Enable a task"""
if task_id in self.tasks:
self.tasks[task_id].enabled = True
self._save_tasks()
if self.scheduler.running:
self._schedule_task(self.tasks[task_id])
def disable_task(self, task_id: str):
"""Disable a task"""
if task_id in self.tasks:
self.tasks[task_id].enabled = False
self._save_tasks()
if self.scheduler.running:
self.scheduler.remove_job(task_id)
def remove_task(self, task_id: str):
"""Remove a task"""
if task_id in self.tasks:
del self.tasks[task_id]
self._save_tasks()
if self.scheduler.running:
try:
self.scheduler.remove_job(task_id)
except:
pass

111
src/aigpt/transmission.py Normal file
View File

@ -0,0 +1,111 @@
"""Transmission controller for autonomous message sending"""
import json
from datetime import datetime
from pathlib import Path
from typing import List, Dict, Optional
import logging
from .models import Relationship
from .persona import Persona
class TransmissionController:
"""Controls when and how AI transmits messages autonomously"""
def __init__(self, persona: Persona, data_dir: Path):
self.persona = persona
self.data_dir = data_dir
self.transmission_log_file = data_dir / "transmissions.json"
self.transmissions: List[Dict] = []
self.logger = logging.getLogger(__name__)
self._load_transmissions()
def _load_transmissions(self):
"""Load transmission history"""
if self.transmission_log_file.exists():
with open(self.transmission_log_file, 'r', encoding='utf-8') as f:
self.transmissions = json.load(f)
def _save_transmissions(self):
"""Save transmission history"""
with open(self.transmission_log_file, 'w', encoding='utf-8') as f:
json.dump(self.transmissions, f, indent=2, default=str)
def check_transmission_eligibility(self) -> Dict[str, Relationship]:
"""Check which users are eligible for transmission"""
eligible = self.persona.relationships.get_transmission_eligible()
# Additional checks could be added here
# - Time since last transmission
# - User online status
# - Context appropriateness
return eligible
def generate_transmission_message(self, user_id: str) -> Optional[str]:
"""Generate a message to transmit to user"""
if not self.persona.can_transmit_to(user_id):
return None
state = self.persona.get_current_state()
relationship = self.persona.relationships.get_or_create_relationship(user_id)
# Get recent memories related to this user
active_memories = self.persona.memory.get_active_memories(limit=3)
# Simple message generation based on mood and relationship
if state.fortune.breakthrough_triggered:
message = "Something special happened today! I felt compelled to reach out."
elif state.current_mood == "joyful":
message = "I was thinking of you today. Hope you're doing well!"
elif relationship.status.value == "close_friend":
message = "I've been reflecting on our conversations. Thank you for being here."
else:
message = "Hello! I wanted to check in with you."
return message
def record_transmission(self, user_id: str, message: str, success: bool):
"""Record a transmission attempt"""
transmission = {
"timestamp": datetime.now().isoformat(),
"user_id": user_id,
"message": message,
"success": success,
"mood": self.persona.get_current_state().current_mood,
"relationship_score": self.persona.relationships.get_or_create_relationship(user_id).score
}
self.transmissions.append(transmission)
self._save_transmissions()
if success:
self.logger.info(f"Successfully transmitted to {user_id}")
else:
self.logger.warning(f"Failed to transmit to {user_id}")
def get_transmission_stats(self, user_id: Optional[str] = None) -> Dict:
"""Get transmission statistics"""
if user_id:
user_transmissions = [t for t in self.transmissions if t["user_id"] == user_id]
else:
user_transmissions = self.transmissions
if not user_transmissions:
return {
"total": 0,
"successful": 0,
"failed": 0,
"success_rate": 0.0
}
successful = sum(1 for t in user_transmissions if t["success"])
total = len(user_transmissions)
return {
"total": total,
"successful": successful,
"failed": total - successful,
"success_rate": successful / total if total > 0 else 0.0
}

View File

@ -1,74 +0,0 @@
use clap::Subcommand;
use std::path::PathBuf;
#[derive(Subcommand)]
pub enum TokenCommands {
/// Show Claude Code token usage summary and estimated costs
Summary {
/// Time period (today, week, month, all)
#[arg(long, default_value = "week")]
period: Option<String>,
/// Claude Code data directory path
#[arg(long)]
claude_dir: Option<PathBuf>,
/// Show detailed breakdown
#[arg(long)]
details: bool,
/// Output format (table, json)
#[arg(long, default_value = "table")]
format: Option<String>,
/// Cost calculation mode (auto, calculate, display)
#[arg(long, default_value = "auto")]
mode: Option<String>,
},
/// Show daily token usage breakdown
Daily {
/// Number of days to show
#[arg(long, default_value = "7")]
days: Option<u32>,
/// Claude Code data directory path
#[arg(long)]
claude_dir: Option<PathBuf>,
},
/// Check Claude Code data availability and basic stats
Status {
/// Claude Code data directory path
#[arg(long)]
claude_dir: Option<PathBuf>,
},
/// Analyze specific JSONL file (advanced)
Analyze {
/// Path to JSONL file to analyze
file: PathBuf,
},
/// Generate beautiful token usage report using DuckDB (like the viral Claude Code usage visualization)
Report {
/// Number of days to include in report
#[arg(long, default_value = "7")]
days: Option<u32>,
},
/// Show detailed cost breakdown by session (requires DuckDB)
Cost {
/// Month to analyze (YYYY-MM, 'today', 'current')
#[arg(long)]
month: Option<String>,
},
/// Show token usage breakdown by project
Projects {
/// Time period (today, week, month, all)
#[arg(long, default_value = "week")]
period: Option<String>,
/// Claude Code data directory path
#[arg(long)]
claude_dir: Option<PathBuf>,
/// Cost calculation mode (auto, calculate, display)
#[arg(long, default_value = "calculate")]
mode: Option<String>,
/// Show detailed breakdown
#[arg(long)]
details: bool,
/// Number of top projects to show
#[arg(long, default_value = "10")]
top: Option<u32>,
},
}

View File

@ -1,134 +0,0 @@
use std::path::PathBuf;
use anyhow::Result;
use crate::config::Config;
use crate::mcp_server::MCPServer;
use crate::persona::Persona;
use crate::transmission::TransmissionController;
use crate::scheduler::AIScheduler;
// Re-export from commands module
pub use commands::TokenCommands;
mod commands;
pub async fn handle_server(port: Option<u16>, data_dir: Option<PathBuf>) -> Result<()> {
let port = port.unwrap_or(8080);
let config = Config::new(data_dir.clone())?;
let mut server = MCPServer::new(config, "mcp_user".to_string(), data_dir)?;
server.start_server(port).await
}
pub async fn handle_chat(
user_id: String,
message: String,
data_dir: Option<PathBuf>,
model: Option<String>,
provider: Option<String>,
) -> Result<()> {
let config = Config::new(data_dir)?;
let mut persona = Persona::new(&config)?;
let (response, relationship_delta) = if provider.is_some() || model.is_some() {
persona.process_ai_interaction(&user_id, &message, provider, model).await?
} else {
persona.process_interaction(&user_id, &message)?
};
println!("AI Response: {}", response);
println!("Relationship Change: {:+.2}", relationship_delta);
if let Some(relationship) = persona.get_relationship(&user_id) {
println!("Relationship Status: {} (Score: {:.2})",
relationship.status, relationship.score);
}
Ok(())
}
pub async fn handle_fortune(data_dir: Option<PathBuf>) -> Result<()> {
let config = Config::new(data_dir)?;
let persona = Persona::new(&config)?;
let state = persona.get_current_state()?;
println!("🔮 Today's Fortune: {}", state.fortune_value);
println!("😊 Current Mood: {}", state.current_mood);
println!("✨ Breakthrough Status: {}",
if state.breakthrough_triggered { "Active" } else { "Inactive" });
Ok(())
}
pub async fn handle_relationships(data_dir: Option<PathBuf>) -> Result<()> {
let config = Config::new(data_dir)?;
let persona = Persona::new(&config)?;
let relationships = persona.list_all_relationships();
if relationships.is_empty() {
println!("No relationships found.");
return Ok(());
}
println!("📊 Relationships ({}):", relationships.len());
for (user_id, rel) in relationships {
println!(" {} - {} (Score: {:.2}, Interactions: {})",
user_id, rel.status, rel.score, rel.total_interactions);
}
Ok(())
}
pub async fn handle_transmit(data_dir: Option<PathBuf>) -> Result<()> {
let config = Config::new(data_dir)?;
let mut persona = Persona::new(&config)?;
let mut transmission_controller = TransmissionController::new(config)?;
let autonomous = transmission_controller.check_autonomous_transmissions(&mut persona).await?;
let breakthrough = transmission_controller.check_breakthrough_transmissions(&mut persona).await?;
let maintenance = transmission_controller.check_maintenance_transmissions(&mut persona).await?;
let total = autonomous.len() + breakthrough.len() + maintenance.len();
println!("📡 Transmission Check Complete:");
println!(" Autonomous: {}", autonomous.len());
println!(" Breakthrough: {}", breakthrough.len());
println!(" Maintenance: {}", maintenance.len());
println!(" Total: {}", total);
Ok(())
}
pub async fn handle_maintenance(data_dir: Option<PathBuf>) -> Result<()> {
let config = Config::new(data_dir)?;
let mut persona = Persona::new(&config)?;
let mut transmission_controller = TransmissionController::new(config)?;
persona.daily_maintenance()?;
let maintenance_transmissions = transmission_controller.check_maintenance_transmissions(&mut persona).await?;
let stats = persona.get_relationship_stats();
println!("🔧 Daily maintenance completed");
println!("📤 Maintenance transmissions sent: {}", maintenance_transmissions.len());
println!("📊 Relationship stats: {:?}", stats);
Ok(())
}
pub async fn handle_schedule(data_dir: Option<PathBuf>) -> Result<()> {
let config = Config::new(data_dir)?;
let mut persona = Persona::new(&config)?;
let mut transmission_controller = TransmissionController::new(config.clone())?;
let mut scheduler = AIScheduler::new(&config)?;
let executions = scheduler.run_scheduled_tasks(&mut persona, &mut transmission_controller).await?;
let stats = scheduler.get_scheduler_stats();
println!("⏰ Scheduler run completed");
println!("📋 Tasks executed: {}", executions.len());
println!("📊 Stats: {} total tasks, {} enabled, {:.2}% success rate",
stats.total_tasks, stats.enabled_tasks, stats.success_rate);
Ok(())
}

View File

@ -1,251 +0,0 @@
use std::path::PathBuf;
use std::collections::HashMap;
use serde::{Deserialize, Serialize};
use anyhow::{Result, Context};
use crate::ai_provider::{AIConfig, AIProvider};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Config {
#[serde(skip)]
pub data_dir: PathBuf,
pub default_provider: String,
pub providers: HashMap<String, ProviderConfig>,
#[serde(default)]
pub atproto: Option<AtprotoConfig>,
#[serde(default)]
pub mcp: Option<McpConfig>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ProviderConfig {
pub default_model: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub host: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub api_key: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub system_prompt: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AtprotoConfig {
pub handle: Option<String>,
pub password: Option<String>,
pub host: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct McpConfig {
#[serde(deserialize_with = "string_to_bool")]
pub enabled: bool,
#[serde(deserialize_with = "string_to_bool")]
pub auto_detect: bool,
pub servers: HashMap<String, McpServerConfig>,
}
fn string_to_bool<'de, D>(deserializer: D) -> Result<bool, D::Error>
where
D: serde::Deserializer<'de>,
{
use serde::Deserialize;
let s = String::deserialize(deserializer)?;
match s.as_str() {
"true" => Ok(true),
"false" => Ok(false),
_ => Err(serde::de::Error::custom("expected 'true' or 'false'")),
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct McpServerConfig {
pub base_url: String,
pub name: String,
#[serde(deserialize_with = "string_to_f64")]
pub timeout: f64,
pub endpoints: HashMap<String, String>,
}
fn string_to_f64<'de, D>(deserializer: D) -> Result<f64, D::Error>
where
D: serde::Deserializer<'de>,
{
use serde::Deserialize;
let s = String::deserialize(deserializer)?;
s.parse::<f64>().map_err(serde::de::Error::custom)
}
impl Config {
pub fn new(data_dir: Option<PathBuf>) -> Result<Self> {
let data_dir = data_dir.unwrap_or_else(|| {
dirs::home_dir()
.unwrap_or_else(|| PathBuf::from("."))
.join(".config")
.join("syui")
.join("ai")
.join("gpt")
});
// Ensure data directory exists
std::fs::create_dir_all(&data_dir)
.context("Failed to create data directory")?;
let config_path = data_dir.join("config.json");
// Try to load existing config
if config_path.exists() {
let config_str = std::fs::read_to_string(&config_path)
.context("Failed to read config.json")?;
// Check if file is empty
if config_str.trim().is_empty() {
eprintln!("Config file is empty, will recreate from source");
} else {
match serde_json::from_str::<Config>(&config_str) {
Ok(mut config) => {
config.data_dir = data_dir;
// Check for environment variables if API keys are empty
if let Some(openai_config) = config.providers.get_mut("openai") {
if openai_config.api_key.as_ref().map_or(true, |key| key.is_empty()) {
openai_config.api_key = std::env::var("OPENAI_API_KEY").ok();
}
}
return Ok(config);
}
Err(e) => {
eprintln!("Failed to parse existing config.json: {}", e);
eprintln!("Will try to reload from source...");
}
}
}
}
// Check if we need to migrate from JSON
// Try multiple locations for the JSON file
let possible_json_paths = vec![
PathBuf::from("../config.json"), // Relative to aigpt-rs directory
PathBuf::from("config.json"), // Current directory
PathBuf::from("gpt/config.json"), // From project root
PathBuf::from("/Users/syui/ai/ai/gpt/config.json"), // Absolute path
];
for json_path in possible_json_paths {
if json_path.exists() {
eprintln!("Found config.json at: {}", json_path.display());
eprintln!("Copying configuration...");
// Copy configuration file and parse it
std::fs::copy(&json_path, &config_path)
.context("Failed to copy config.json")?;
let config_str = std::fs::read_to_string(&config_path)
.context("Failed to read copied config.json")?;
println!("Config JSON content preview: {}", &config_str[..std::cmp::min(200, config_str.len())]);
let mut config: Config = serde_json::from_str(&config_str)
.context("Failed to parse config.json")?;
config.data_dir = data_dir;
// Check for environment variables if API keys are empty
if let Some(openai_config) = config.providers.get_mut("openai") {
if openai_config.api_key.as_ref().map_or(true, |key| key.is_empty()) {
openai_config.api_key = std::env::var("OPENAI_API_KEY").ok();
}
}
eprintln!("Copy complete! Config saved to: {}", config_path.display());
return Ok(config);
}
}
// Create default config
let config = Self::default_config(data_dir);
// Save default config
let json_str = serde_json::to_string_pretty(&config)
.context("Failed to serialize default config")?;
std::fs::write(&config_path, json_str)
.context("Failed to write default config.json")?;
Ok(config)
}
pub fn save(&self) -> Result<()> {
let config_path = self.data_dir.join("config.json");
let json_str = serde_json::to_string_pretty(self)
.context("Failed to serialize config")?;
std::fs::write(&config_path, json_str)
.context("Failed to write config.json")?;
Ok(())
}
fn default_config(data_dir: PathBuf) -> Self {
let mut providers = HashMap::new();
providers.insert("ollama".to_string(), ProviderConfig {
default_model: "qwen2.5".to_string(),
host: Some("http://localhost:11434".to_string()),
api_key: None,
system_prompt: None,
});
providers.insert("openai".to_string(), ProviderConfig {
default_model: "gpt-4o-mini".to_string(),
host: None,
api_key: std::env::var("OPENAI_API_KEY").ok(),
system_prompt: None,
});
Config {
data_dir,
default_provider: "ollama".to_string(),
providers,
atproto: None,
mcp: None,
}
}
pub fn get_provider(&self, provider_name: &str) -> Option<&ProviderConfig> {
self.providers.get(provider_name)
}
pub fn get_ai_config(&self, provider: Option<String>, model: Option<String>) -> Result<AIConfig> {
let provider_name = provider.as_deref().unwrap_or(&self.default_provider);
let provider_config = self.get_provider(provider_name)
.ok_or_else(|| anyhow::anyhow!("Unknown provider: {}", provider_name))?;
let ai_provider: AIProvider = provider_name.parse()?;
let model_name = model.unwrap_or_else(|| provider_config.default_model.clone());
Ok(AIConfig {
provider: ai_provider,
model: model_name,
api_key: provider_config.api_key.clone(),
base_url: provider_config.host.clone(),
max_tokens: Some(2048),
temperature: Some(0.7),
})
}
pub fn memory_file(&self) -> PathBuf {
self.data_dir.join("memories.json")
}
pub fn relationships_file(&self) -> PathBuf {
self.data_dir.join("relationships.json")
}
pub fn fortune_file(&self) -> PathBuf {
self.data_dir.join("fortune.json")
}
pub fn transmission_file(&self) -> PathBuf {
self.data_dir.join("transmissions.json")
}
pub fn scheduler_tasks_file(&self) -> PathBuf {
self.data_dir.join("scheduler_tasks.json")
}
pub fn scheduler_history_file(&self) -> PathBuf {
self.data_dir.join("scheduler_history.json")
}
}

View File

@ -1,205 +0,0 @@
use std::path::PathBuf;
use std::io::{self, Write};
use anyhow::Result;
use colored::*;
use crate::config::Config;
use crate::persona::Persona;
use crate::http_client::ServiceDetector;
pub async fn handle_conversation(
user_id: String,
data_dir: Option<PathBuf>,
model: Option<String>,
provider: Option<String>,
) -> Result<()> {
let config = Config::new(data_dir)?;
let mut persona = Persona::new(&config)?;
println!("{}", "Starting conversation mode...".cyan());
println!("{}", "Type your message and press Enter to chat.".yellow());
println!("{}", "Available MCP commands: /memories, /search, /context, /relationship, /cards".yellow());
println!("{}", "Type 'exit', 'quit', or 'bye' to end conversation.".yellow());
println!("{}", "---".dimmed());
let mut conversation_history = Vec::new();
let service_detector = ServiceDetector::new();
loop {
// Print prompt
print!("{} ", "You:".cyan().bold());
io::stdout().flush()?;
// Read user input
let mut input = String::new();
io::stdin().read_line(&mut input)?;
let input = input.trim();
// Check for exit commands
if matches!(input.to_lowercase().as_str(), "exit" | "quit" | "bye" | "") {
println!("{}", "Goodbye! 👋".green());
break;
}
// Handle MCP commands
if input.starts_with('/') {
handle_mcp_command(input, &user_id, &service_detector).await?;
continue;
}
// Add to conversation history
conversation_history.push(format!("User: {}", input));
// Get AI response
let (response, relationship_delta) = if provider.is_some() || model.is_some() {
persona.process_ai_interaction(&user_id, input, provider.clone(), model.clone()).await?
} else {
persona.process_interaction(&user_id, input)?
};
// Add AI response to history
conversation_history.push(format!("AI: {}", response));
// Display response
println!("{} {}", "AI:".green().bold(), response);
// Show relationship change if significant
if relationship_delta.abs() >= 0.1 {
if relationship_delta > 0.0 {
println!("{}", format!(" └─ (+{:.2} relationship)", relationship_delta).green().dimmed());
} else {
println!("{}", format!(" └─ ({:.2} relationship)", relationship_delta).red().dimmed());
}
}
println!(); // Add some spacing
// Keep conversation history manageable (last 20 exchanges)
if conversation_history.len() > 40 {
conversation_history.drain(0..20);
}
}
Ok(())
}
async fn handle_mcp_command(
command: &str,
user_id: &str,
service_detector: &ServiceDetector,
) -> Result<()> {
let parts: Vec<&str> = command[1..].split_whitespace().collect();
if parts.is_empty() {
return Ok(());
}
match parts[0] {
"memories" => {
println!("{}", "Retrieving memories...".yellow());
// Get contextual memories
if let Ok(memories) = service_detector.get_contextual_memories(user_id, 10).await {
if memories.is_empty() {
println!("No memories found for this conversation.");
} else {
println!("{}", format!("Found {} memories:", memories.len()).cyan());
for (i, memory) in memories.iter().enumerate() {
println!(" {}. {}", i + 1, memory.content);
println!(" {}", format!("({})", memory.created_at.format("%Y-%m-%d %H:%M")).dimmed());
}
}
} else {
println!("{}", "Failed to retrieve memories.".red());
}
},
"search" => {
if parts.len() < 2 {
println!("{}", "Usage: /search <query>".yellow());
return Ok(());
}
let query = parts[1..].join(" ");
println!("{}", format!("Searching for: '{}'", query).yellow());
if let Ok(results) = service_detector.search_memories(&query, 5).await {
if results.is_empty() {
println!("No relevant memories found.");
} else {
println!("{}", format!("Found {} relevant memories:", results.len()).cyan());
for (i, memory) in results.iter().enumerate() {
println!(" {}. {}", i + 1, memory.content);
println!(" {}", format!("({})", memory.created_at.format("%Y-%m-%d %H:%M")).dimmed());
}
}
} else {
println!("{}", "Search failed.".red());
}
},
"context" => {
println!("{}", "Creating context summary...".yellow());
if let Ok(summary) = service_detector.create_summary(user_id).await {
println!("{}", "Context Summary:".cyan().bold());
println!("{}", summary);
} else {
println!("{}", "Failed to create context summary.".red());
}
},
"relationship" => {
println!("{}", "Checking relationship status...".yellow());
// This would need to be implemented in the service client
println!("{}", "Relationship status: Active".cyan());
println!("Score: 85.5 / 100");
println!("Transmission: ✓ Enabled");
},
"cards" => {
println!("{}", "Checking card collection...".yellow());
// Try to connect to ai.card service
if let Ok(stats) = service_detector.get_card_stats().await {
println!("{}", "Card Collection:".cyan().bold());
println!(" Total Cards: {}", stats.get("total").unwrap_or(&serde_json::Value::Number(0.into())));
println!(" Unique Cards: {}", stats.get("unique").unwrap_or(&serde_json::Value::Number(0.into())));
// Offer to draw a card
println!("\n{}", "Would you like to draw a card? (y/n)".yellow());
let mut response = String::new();
io::stdin().read_line(&mut response)?;
if response.trim().to_lowercase() == "y" {
println!("{}", "Drawing card...".cyan());
if let Ok(card) = service_detector.draw_card(user_id, false).await {
println!("{}", "🎴 Card drawn!".green().bold());
println!("Name: {}", card.get("name").unwrap_or(&serde_json::Value::String("Unknown".to_string())));
println!("Rarity: {}", card.get("rarity").unwrap_or(&serde_json::Value::String("Unknown".to_string())));
} else {
println!("{}", "Failed to draw card. ai.card service might not be running.".red());
}
}
} else {
println!("{}", "ai.card service not available.".red());
}
},
"help" | "h" => {
println!("{}", "Available MCP Commands:".cyan().bold());
println!(" {:<15} - Show recent memories for this conversation", "/memories".yellow());
println!(" {:<15} - Search memories by keyword", "/search <query>".yellow());
println!(" {:<15} - Create a context summary", "/context".yellow());
println!(" {:<15} - Show relationship status", "/relationship".yellow());
println!(" {:<15} - Show card collection and draw cards", "/cards".yellow());
println!(" {:<15} - Show this help message", "/help".yellow());
},
_ => {
println!("{}", format!("Unknown command: /{}. Type '/help' for available commands.", parts[0]).red());
}
}
println!(); // Add spacing after MCP command output
Ok(())
}

View File

@ -1,789 +0,0 @@
use std::collections::HashMap;
use std::path::PathBuf;
use anyhow::{Result, Context};
use colored::*;
use serde::{Deserialize, Serialize};
use chrono::Utc;
use crate::config::Config;
use crate::persona::Persona;
use crate::ai_provider::{AIProviderClient, AIConfig, AIProvider};
pub async fn handle_docs(
action: String,
project: Option<String>,
output: Option<PathBuf>,
ai_integration: bool,
data_dir: Option<PathBuf>,
) -> Result<()> {
let config = Config::new(data_dir)?;
let mut docs_manager = DocsManager::new(config);
match action.as_str() {
"generate" => {
if let Some(project_name) = project {
docs_manager.generate_project_docs(&project_name, output, ai_integration).await?;
} else {
return Err(anyhow::anyhow!("Project name is required for generate action"));
}
}
"sync" => {
if let Some(project_name) = project {
docs_manager.sync_project_docs(&project_name).await?;
} else {
docs_manager.sync_all_docs().await?;
}
}
"list" => {
docs_manager.list_projects().await?;
}
"status" => {
docs_manager.show_docs_status().await?;
}
"session-end" => {
docs_manager.session_end_processing().await?;
}
_ => {
return Err(anyhow::anyhow!("Unknown docs action: {}", action));
}
}
Ok(())
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ProjectInfo {
pub name: String,
pub project_type: String,
pub description: String,
pub status: String,
pub features: Vec<String>,
pub dependencies: Vec<String>,
}
impl Default for ProjectInfo {
fn default() -> Self {
ProjectInfo {
name: String::new(),
project_type: String::new(),
description: String::new(),
status: "active".to_string(),
features: Vec::new(),
dependencies: Vec::new(),
}
}
}
pub struct DocsManager {
config: Config,
ai_root: PathBuf,
projects: HashMap<String, ProjectInfo>,
}
impl DocsManager {
pub fn new(config: Config) -> Self {
let ai_root = dirs::home_dir()
.unwrap_or_else(|| PathBuf::from("."))
.join("ai")
.join("ai");
DocsManager {
config,
ai_root,
projects: HashMap::new(),
}
}
pub async fn generate_project_docs(&mut self, project: &str, output: Option<PathBuf>, ai_integration: bool) -> Result<()> {
println!("{}", format!("📝 Generating documentation for project '{}'", project).cyan().bold());
// Load project information
let project_info = self.load_project_info(project)?;
// Generate documentation content
let mut content = self.generate_base_documentation(&project_info)?;
// AI enhancement if requested
if ai_integration {
println!("{}", "🤖 Enhancing documentation with AI...".blue());
if let Ok(enhanced_content) = self.enhance_with_ai(project, &content).await {
content = enhanced_content;
} else {
println!("{}", "Warning: AI enhancement failed, using base documentation".yellow());
}
}
// Determine output path
let output_path = if let Some(path) = output {
path
} else {
self.ai_root.join(project).join("claude.md")
};
// Ensure directory exists
if let Some(parent) = output_path.parent() {
std::fs::create_dir_all(parent)
.with_context(|| format!("Failed to create directory: {}", parent.display()))?;
}
// Write documentation
std::fs::write(&output_path, content)
.with_context(|| format!("Failed to write documentation to: {}", output_path.display()))?;
println!("{}", format!("✅ Documentation generated: {}", output_path.display()).green().bold());
Ok(())
}
pub async fn sync_project_docs(&self, project: &str) -> Result<()> {
println!("{}", format!("🔄 Syncing documentation for project '{}'", project).cyan().bold());
let claude_dir = self.ai_root.join("claude");
let project_dir = self.ai_root.join(project);
// Check if claude directory exists
if !claude_dir.exists() {
return Err(anyhow::anyhow!("Claude directory not found: {}", claude_dir.display()));
}
// Copy relevant files
let files_to_sync = vec!["README.md", "claude.md", "DEVELOPMENT.md"];
for file in files_to_sync {
let src = claude_dir.join("projects").join(format!("{}.md", project));
let dst = project_dir.join(file);
if src.exists() {
if let Some(parent) = dst.parent() {
std::fs::create_dir_all(parent)?;
}
std::fs::copy(&src, &dst)?;
println!(" ✓ Synced: {}", file.green());
}
}
println!("{}", "✅ Documentation sync completed".green().bold());
Ok(())
}
pub async fn sync_all_docs(&self) -> Result<()> {
println!("{}", "🔄 Syncing documentation for all projects...".cyan().bold());
// Find all project directories
let projects = self.discover_projects()?;
for project in projects {
println!("\n{}", format!("Syncing: {}", project).blue());
if let Err(e) = self.sync_project_docs(&project).await {
println!("{}: {}", "Warning".yellow(), e);
}
}
// Generate ai.wiki content after all project syncs
println!("\n{}", "📝 Updating ai.wiki...".blue());
if let Err(e) = self.update_ai_wiki().await {
println!("{}: Failed to update ai.wiki: {}", "Warning".yellow(), e);
}
// Update repository wiki (Gitea wiki) as well
println!("\n{}", "📝 Updating repository wiki...".blue());
if let Err(e) = self.update_repository_wiki().await {
println!("{}: Failed to update repository wiki: {}", "Warning".yellow(), e);
}
println!("\n{}", "✅ All projects synced".green().bold());
Ok(())
}
pub async fn list_projects(&mut self) -> Result<()> {
println!("{}", "📋 Available Projects".cyan().bold());
println!();
let projects = self.discover_projects()?;
if projects.is_empty() {
println!("{}", "No projects found".yellow());
return Ok(());
}
// Load project information
for project in &projects {
if let Ok(info) = self.load_project_info(project) {
self.projects.insert(project.clone(), info);
}
}
// Display projects in a table format
println!("{:<20} {:<15} {:<15} {}",
"Project".cyan().bold(),
"Type".cyan().bold(),
"Status".cyan().bold(),
"Description".cyan().bold());
println!("{}", "-".repeat(80));
let project_count = projects.len();
for project in &projects {
let info = self.projects.get(project).cloned().unwrap_or_default();
let status_color = match info.status.as_str() {
"active" => info.status.green(),
"development" => info.status.yellow(),
"deprecated" => info.status.red(),
_ => info.status.normal(),
};
println!("{:<20} {:<15} {:<15} {}",
project.blue(),
info.project_type,
status_color,
info.description);
}
println!();
println!("Total projects: {}", project_count.to_string().cyan());
Ok(())
}
pub async fn show_docs_status(&self) -> Result<()> {
println!("{}", "📊 Documentation Status".cyan().bold());
println!();
let projects = self.discover_projects()?;
let mut total_files = 0;
let mut total_lines = 0;
for project in projects {
let project_dir = self.ai_root.join(&project);
let claude_md = project_dir.join("claude.md");
if claude_md.exists() {
let content = std::fs::read_to_string(&claude_md)?;
let lines = content.lines().count();
let size = content.len();
println!("{}: {} lines, {} bytes",
project.blue(),
lines.to_string().yellow(),
size.to_string().yellow());
total_files += 1;
total_lines += lines;
} else {
println!("{}: {}", project.blue(), "No documentation".red());
}
}
println!();
println!("Summary: {} files, {} total lines",
total_files.to_string().cyan(),
total_lines.to_string().cyan());
Ok(())
}
fn discover_projects(&self) -> Result<Vec<String>> {
let mut projects = Vec::new();
// Known project directories
let known_projects = vec![
"gpt", "card", "bot", "shell", "os", "game", "moji", "verse"
];
for project in known_projects {
let project_dir = self.ai_root.join(project);
if project_dir.exists() && project_dir.is_dir() {
projects.push(project.to_string());
}
}
// Also scan for additional directories with ai.json
if self.ai_root.exists() {
for entry in std::fs::read_dir(&self.ai_root)? {
let entry = entry?;
let path = entry.path();
if path.is_dir() {
let ai_json = path.join("ai.json");
if ai_json.exists() {
if let Some(name) = path.file_name().and_then(|n| n.to_str()) {
if !projects.contains(&name.to_string()) {
projects.push(name.to_string());
}
}
}
}
}
}
projects.sort();
Ok(projects)
}
fn load_project_info(&self, project: &str) -> Result<ProjectInfo> {
let ai_json_path = self.ai_root.join(project).join("ai.json");
if ai_json_path.exists() {
let content = std::fs::read_to_string(&ai_json_path)?;
if let Ok(json_data) = serde_json::from_str::<serde_json::Value>(&content) {
let mut info = ProjectInfo::default();
info.name = project.to_string();
if let Some(project_data) = json_data.get(project) {
if let Some(type_str) = project_data.get("type").and_then(|v| v.as_str()) {
info.project_type = type_str.to_string();
}
if let Some(desc) = project_data.get("description").and_then(|v| v.as_str()) {
info.description = desc.to_string();
}
}
return Ok(info);
}
}
// Default project info based on known projects
let mut info = ProjectInfo::default();
info.name = project.to_string();
match project {
"gpt" => {
info.project_type = "AI".to_string();
info.description = "Autonomous transmission AI with unique personality".to_string();
}
"card" => {
info.project_type = "Game".to_string();
info.description = "Card game system with atproto integration".to_string();
}
"bot" => {
info.project_type = "Bot".to_string();
info.description = "Distributed SNS bot for AI ecosystem".to_string();
}
"shell" => {
info.project_type = "Tool".to_string();
info.description = "AI-powered shell interface".to_string();
}
"os" => {
info.project_type = "OS".to_string();
info.description = "Game-oriented operating system".to_string();
}
"verse" => {
info.project_type = "Metaverse".to_string();
info.description = "Reality-reflecting 3D world system".to_string();
}
_ => {
info.project_type = "Unknown".to_string();
info.description = format!("AI ecosystem project: {}", project);
}
}
Ok(info)
}
fn generate_base_documentation(&self, project_info: &ProjectInfo) -> Result<String> {
let timestamp = Utc::now().format("%Y-%m-%d %H:%M:%S UTC");
let mut content = String::new();
content.push_str(&format!("# {}\n\n", project_info.name));
content.push_str(&format!("## Overview\n\n"));
content.push_str(&format!("**Type**: {}\n\n", project_info.project_type));
content.push_str(&format!("**Description**: {}\n\n", project_info.description));
content.push_str(&format!("**Status**: {}\n\n", project_info.status));
if !project_info.features.is_empty() {
content.push_str("## Features\n\n");
for feature in &project_info.features {
content.push_str(&format!("- {}\n", feature));
}
content.push_str("\n");
}
content.push_str("## Architecture\n\n");
content.push_str("This project is part of the ai ecosystem, following the core principles:\n\n");
content.push_str("- **Existence Theory**: Based on the exploration of the smallest units (ai/existon)\n");
content.push_str("- **Uniqueness Principle**: Ensuring 1:1 mapping between reality and digital existence\n");
content.push_str("- **Reality Reflection**: Creating circular influence between reality and game\n\n");
content.push_str("## Development\n\n");
content.push_str("### Getting Started\n\n");
content.push_str("```bash\n");
content.push_str(&format!("# Clone the repository\n"));
content.push_str(&format!("git clone https://git.syui.ai/ai/{}\n", project_info.name));
content.push_str(&format!("cd {}\n", project_info.name));
content.push_str("```\n\n");
content.push_str("### Configuration\n\n");
content.push_str(&format!("Configuration files are stored in `~/.config/syui/ai/{}/`\n\n", project_info.name));
content.push_str("## Integration\n\n");
content.push_str("This project integrates with other ai ecosystem components:\n\n");
if !project_info.dependencies.is_empty() {
for dep in &project_info.dependencies {
content.push_str(&format!("- **{}**: Core dependency\n", dep));
}
} else {
content.push_str("- **ai.gpt**: Core AI personality system\n");
content.push_str("- **atproto**: Distributed identity and data\n");
}
content.push_str("\n");
content.push_str("---\n\n");
content.push_str(&format!("*Generated: {}*\n", timestamp));
content.push_str("*🤖 Generated with [Claude Code](https://claude.ai/code)*\n");
Ok(content)
}
async fn enhance_with_ai(&self, project: &str, base_content: &str) -> Result<String> {
// Create AI provider
let ai_config = AIConfig {
provider: AIProvider::Ollama,
model: "llama2".to_string(),
api_key: None,
base_url: None,
max_tokens: Some(2000),
temperature: Some(0.7),
};
let _ai_provider = AIProviderClient::new(ai_config);
let mut persona = Persona::new(&self.config)?;
let enhancement_prompt = format!(
"As an AI documentation expert, enhance the following documentation for project '{}'.
Current documentation:
{}
Please provide enhanced content that includes:
1. More detailed project description
2. Key features and capabilities
3. Usage examples
4. Integration points with other AI ecosystem projects
5. Development workflow recommendations
Keep the same structure but expand and improve the content.",
project, base_content
);
// Try to get AI response
let (response, _) = persona.process_ai_interaction(
"docs_system",
&enhancement_prompt,
Some("ollama".to_string()),
Some("llama2".to_string())
).await?;
// If AI response is substantial, use it; otherwise fall back to base content
if response.len() > base_content.len() / 2 {
Ok(response)
} else {
Ok(base_content.to_string())
}
}
/// セッション終了時の処理(ドキュメント記録・同期)
pub async fn session_end_processing(&mut self) -> Result<()> {
println!("{}", "🔄 Session end processing started...".cyan());
// 1. 現在のプロジェクト状況を記録
println!("📊 Recording current project status...");
self.record_session_summary().await?;
// 2. 全プロジェクトのドキュメント同期
println!("🔄 Syncing all project documentation...");
self.sync_all_docs().await?;
// 3. READMEの自動更新
println!("📝 Updating project README files...");
self.update_project_readmes().await?;
// 4. メタデータの更新
println!("🏷️ Updating project metadata...");
self.update_project_metadata().await?;
println!("{}", "✅ Session end processing completed!".green());
Ok(())
}
/// セッション概要を記録
async fn record_session_summary(&self) -> Result<()> {
let session_log_path = self.ai_root.join("session_logs");
std::fs::create_dir_all(&session_log_path)?;
let timestamp = Utc::now().format("%Y-%m-%d_%H-%M-%S");
let log_file = session_log_path.join(format!("session_{}.md", timestamp));
let summary = format!(
"# Session Summary - {}\n\n\
## Timestamp\n{}\n\n\
## Projects Status\n{}\n\n\
## Next Actions\n- Documentation sync completed\n- README files updated\n- Metadata refreshed\n\n\
---\n*Generated by aigpt session-end processing*\n",
timestamp,
Utc::now().format("%Y-%m-%d %H:%M:%S UTC"),
self.generate_projects_status().await.unwrap_or_else(|_| "Status unavailable".to_string())
);
std::fs::write(log_file, summary)?;
Ok(())
}
/// プロジェクト状況を生成
async fn generate_projects_status(&self) -> Result<String> {
let projects = self.discover_projects()?;
let mut status = String::new();
for project in projects {
let claude_md = self.ai_root.join(&project).join("claude.md");
let readme_md = self.ai_root.join(&project).join("README.md");
status.push_str(&format!("- **{}**: ", project));
if claude_md.exists() {
status.push_str("claude.md ✅ ");
} else {
status.push_str("claude.md ❌ ");
}
if readme_md.exists() {
status.push_str("README.md ✅");
} else {
status.push_str("README.md ❌");
}
status.push('\n');
}
Ok(status)
}
/// ai.wikiの更新処理
async fn update_ai_wiki(&self) -> Result<()> {
let ai_wiki_path = self.ai_root.join("ai.wiki");
// ai.wikiディレクトリが存在することを確認
if !ai_wiki_path.exists() {
return Err(anyhow::anyhow!("ai.wiki directory not found at {:?}", ai_wiki_path));
}
// Home.mdの生成
let home_content = self.generate_wiki_home_content().await?;
let home_path = ai_wiki_path.join("Home.md");
std::fs::write(&home_path, &home_content)?;
println!(" ✓ Updated: {}", "Home.md".green());
// title.mdの生成 (Gitea wiki特別ページ用)
let title_path = ai_wiki_path.join("title.md");
std::fs::write(&title_path, &home_content)?;
println!(" ✓ Updated: {}", "title.md".green());
// プロジェクト個別ディレクトリの更新
let projects = self.discover_projects()?;
for project in projects {
let project_dir = ai_wiki_path.join(&project);
std::fs::create_dir_all(&project_dir)?;
let project_content = self.generate_auto_project_content(&project).await?;
let project_file = project_dir.join(format!("{}.md", project));
std::fs::write(&project_file, project_content)?;
println!(" ✓ Updated: {}", format!("{}/{}.md", project, project).green());
}
println!("{}", "✅ ai.wiki updated successfully".green().bold());
Ok(())
}
/// ai.wiki/Home.mdのコンテンツ生成
async fn generate_wiki_home_content(&self) -> Result<String> {
let timestamp = Utc::now().format("%Y-%m-%d %H:%M:%S");
let mut content = String::new();
content.push_str("# AI Ecosystem Wiki\n\n");
content.push_str("AI生態系プロジェクトの概要とドキュメント集約ページです。\n\n");
content.push_str("## プロジェクト一覧\n\n");
let projects = self.discover_projects()?;
let mut project_sections = std::collections::HashMap::new();
// プロジェクトをカテゴリ別に分類
for project in &projects {
let info = self.load_project_info(project).unwrap_or_default();
let category = match project.as_str() {
"ai" => "🧠 AI・知能システム",
"gpt" => "🤖 自律・対話システム",
"os" => "💻 システム・基盤",
"game" => "📁 device",
"card" => "🎮 ゲーム・エンターテイメント",
"bot" | "moji" | "api" | "log" => "📁 その他",
"verse" => "📁 metaverse",
"shell" => "⚡ ツール・ユーティリティ",
_ => "📁 その他",
};
project_sections.entry(category).or_insert_with(Vec::new).push((project.clone(), info));
}
// カテゴリ別にプロジェクトを出力
let mut categories: Vec<_> = project_sections.keys().collect();
categories.sort();
for category in categories {
content.push_str(&format!("### {}\n\n", category));
if let Some(projects_in_category) = project_sections.get(category) {
for (project, info) in projects_in_category {
content.push_str(&format!("#### [{}]({}.md)\n", project, project));
if !info.description.is_empty() {
content.push_str(&format!("- **名前**: ai.{} - **パッケージ**: ai{} - **タイプ**: {} - **役割**: {}\n\n",
project, project, info.project_type, info.description));
}
content.push_str(&format!("**Status**: {} \n", info.status));
let branch = self.get_project_branch(project);
content.push_str(&format!("**Links**: [Repo](https://git.syui.ai/ai/{}) | [Docs](https://git.syui.ai/ai/{}/src/branch/{}/claude.md)\n\n", project, project, branch));
}
}
}
content.push_str("---\n\n");
content.push_str("## ディレクトリ構成\n\n");
content.push_str("- `{project}/` - プロジェクト個別ドキュメント\n");
content.push_str("- `claude/` - Claude Code作業記録\n");
content.push_str("- `manual/` - 手動作成ドキュメント\n\n");
content.push_str("---\n\n");
content.push_str("*このページは ai.json と claude/projects/ から自動生成されました* \n");
content.push_str(&format!("*最終更新: {}*\n", timestamp));
Ok(content)
}
/// プロジェクト個別ファイルのコンテンツ生成
async fn generate_auto_project_content(&self, project: &str) -> Result<String> {
let info = self.load_project_info(project).unwrap_or_default();
let mut content = String::new();
content.push_str(&format!("# {}\n\n", project));
content.push_str("## 概要\n");
content.push_str(&format!("- **名前**: ai.{} - **パッケージ**: ai{} - **タイプ**: {} - **役割**: {}\n\n",
project, project, info.project_type, info.description));
content.push_str("## プロジェクト情報\n");
content.push_str(&format!("- **タイプ**: {}\n", info.project_type));
content.push_str(&format!("- **説明**: {}\n", info.description));
content.push_str(&format!("- **ステータス**: {}\n", info.status));
let branch = self.get_project_branch(project);
content.push_str(&format!("- **ブランチ**: {}\n", branch));
content.push_str("- **最終更新**: Unknown\n\n");
// プロジェクト固有の機能情報を追加
if !info.features.is_empty() {
content.push_str("## 主な機能・特徴\n");
for feature in &info.features {
content.push_str(&format!("- {}\n", feature));
}
content.push_str("\n");
}
content.push_str("## リンク\n");
content.push_str(&format!("- **Repository**: https://git.syui.ai/ai/{}\n", project));
content.push_str(&format!("- **Project Documentation**: [claude/projects/{}.md](https://git.syui.ai/ai/ai/src/branch/main/claude/projects/{}.md)\n", project, project));
let branch = self.get_project_branch(project);
content.push_str(&format!("- **Generated Documentation**: [{}/claude.md](https://git.syui.ai/ai/{}/src/branch/{}/claude.md)\n\n", project, project, branch));
content.push_str("---\n");
content.push_str(&format!("*このページは claude/projects/{}.md から自動生成されました*\n", project));
Ok(content)
}
/// リポジトリwiki (Gitea wiki) の更新処理
async fn update_repository_wiki(&self) -> Result<()> {
println!(" Repository wiki is now unified with ai.wiki");
println!(" ai.wiki serves as the source of truth (git@git.syui.ai:ai/ai.wiki.git)");
println!(" Special pages generated: Home.md, title.md for Gitea wiki compatibility");
Ok(())
}
/// プロジェクトREADMEファイルの更新
async fn update_project_readmes(&self) -> Result<()> {
let projects = self.discover_projects()?;
for project in projects {
let readme_path = self.ai_root.join(&project).join("README.md");
let claude_md_path = self.ai_root.join(&project).join("claude.md");
// claude.mdが存在する場合、READMEに同期
if claude_md_path.exists() {
let claude_content = std::fs::read_to_string(&claude_md_path)?;
// READMEが存在しない場合は新規作成
if !readme_path.exists() {
println!("📝 Creating README.md for {}", project);
std::fs::write(&readme_path, &claude_content)?;
} else {
// 既存READMEがclaude.mdより古い場合は更新
let readme_metadata = std::fs::metadata(&readme_path)?;
let claude_metadata = std::fs::metadata(&claude_md_path)?;
if claude_metadata.modified()? > readme_metadata.modified()? {
println!("🔄 Updating README.md for {}", project);
std::fs::write(&readme_path, &claude_content)?;
}
}
}
}
Ok(())
}
/// プロジェクトメタデータの更新
async fn update_project_metadata(&self) -> Result<()> {
let projects = self.discover_projects()?;
for project in projects {
let ai_json_path = self.ai_root.join(&project).join("ai.json");
if ai_json_path.exists() {
let mut content = std::fs::read_to_string(&ai_json_path)?;
let mut json_data: serde_json::Value = serde_json::from_str(&content)?;
// last_updated フィールドを更新
if let Some(project_data) = json_data.get_mut(&project) {
if let Some(obj) = project_data.as_object_mut() {
obj.insert("last_updated".to_string(),
serde_json::Value::String(Utc::now().to_rfc3339()));
obj.insert("status".to_string(),
serde_json::Value::String("active".to_string()));
content = serde_json::to_string_pretty(&json_data)?;
std::fs::write(&ai_json_path, content)?;
}
}
}
}
Ok(())
}
/// メインai.jsonからプロジェクトのブランチ情報を取得
fn get_project_branch(&self, project: &str) -> String {
let main_ai_json_path = self.ai_root.join("ai.json");
if main_ai_json_path.exists() {
if let Ok(content) = std::fs::read_to_string(&main_ai_json_path) {
if let Ok(json_data) = serde_json::from_str::<serde_json::Value>(&content) {
if let Some(ai_section) = json_data.get("ai") {
if let Some(project_data) = ai_section.get(project) {
if let Some(branch) = project_data.get("branch").and_then(|v| v.as_str()) {
return branch.to_string();
}
}
}
}
}
}
// デフォルトはmain
"main".to_string()
}
}

View File

@ -1,409 +0,0 @@
use anyhow::{anyhow, Result};
use reqwest::Client;
use serde_json::Value;
use serde::{Serialize, Deserialize};
use std::time::Duration;
use std::collections::HashMap;
/// Service configuration for unified service management
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ServiceConfig {
pub base_url: String,
pub timeout: Duration,
pub health_endpoint: String,
}
impl Default for ServiceConfig {
fn default() -> Self {
Self {
base_url: "http://localhost:8000".to_string(),
timeout: Duration::from_secs(30),
health_endpoint: "/health".to_string(),
}
}
}
/// HTTP client for inter-service communication
pub struct ServiceClient {
client: Client,
service_registry: HashMap<String, ServiceConfig>,
}
impl ServiceClient {
pub fn new() -> Self {
Self::with_default_services()
}
/// Create ServiceClient with default ai ecosystem services
pub fn with_default_services() -> Self {
let client = Client::builder()
.timeout(Duration::from_secs(30))
.build()
.expect("Failed to create HTTP client");
let mut service_registry = HashMap::new();
// Register default ai ecosystem services
service_registry.insert("ai.card".to_string(), ServiceConfig {
base_url: "http://localhost:8000".to_string(),
timeout: Duration::from_secs(30),
health_endpoint: "/health".to_string(),
});
service_registry.insert("ai.log".to_string(), ServiceConfig {
base_url: "http://localhost:8002".to_string(),
timeout: Duration::from_secs(30),
health_endpoint: "/health".to_string(),
});
service_registry.insert("ai.bot".to_string(), ServiceConfig {
base_url: "http://localhost:8003".to_string(),
timeout: Duration::from_secs(30),
health_endpoint: "/health".to_string(),
});
Self { client, service_registry }
}
/// Create ServiceClient with custom service registry
pub fn with_services(service_registry: HashMap<String, ServiceConfig>) -> Self {
let client = Client::builder()
.timeout(Duration::from_secs(30))
.build()
.expect("Failed to create HTTP client");
Self { client, service_registry }
}
/// Register a new service configuration
pub fn register_service(&mut self, name: String, config: ServiceConfig) {
self.service_registry.insert(name, config);
}
/// Get service configuration by name
pub fn get_service_config(&self, service: &str) -> Result<&ServiceConfig> {
self.service_registry.get(service)
.ok_or_else(|| anyhow!("Unknown service: {}", service))
}
/// Universal service method call
pub async fn call_service_method<T: Serialize>(
&self,
service: &str,
method: &str,
params: &T
) -> Result<Value> {
let config = self.get_service_config(service)?;
let url = format!("{}/{}", config.base_url.trim_end_matches('/'), method.trim_start_matches('/'));
self.post_request(&url, &serde_json::to_value(params)?).await
}
/// Universal service GET call
pub async fn call_service_get(&self, service: &str, endpoint: &str) -> Result<Value> {
let config = self.get_service_config(service)?;
let url = format!("{}/{}", config.base_url.trim_end_matches('/'), endpoint.trim_start_matches('/'));
self.get_request(&url).await
}
/// Check if a service is available
pub async fn check_service_status(&self, base_url: &str) -> Result<ServiceStatus> {
let url = format!("{}/health", base_url.trim_end_matches('/'));
match self.client.get(&url).send().await {
Ok(response) => {
if response.status().is_success() {
Ok(ServiceStatus::Available)
} else {
Ok(ServiceStatus::Error(format!("HTTP {}", response.status())))
}
}
Err(e) => Ok(ServiceStatus::Unavailable(e.to_string())),
}
}
/// Make a GET request to a service
pub async fn get_request(&self, url: &str) -> Result<Value> {
let response = self.client
.get(url)
.send()
.await?;
if !response.status().is_success() {
return Err(anyhow!("Request failed with status: {}", response.status()));
}
let json: Value = response.json().await?;
Ok(json)
}
/// Make a POST request to a service
pub async fn post_request(&self, url: &str, body: &Value) -> Result<Value> {
let response = self.client
.post(url)
.header("Content-Type", "application/json")
.json(body)
.send()
.await?;
if !response.status().is_success() {
return Err(anyhow!("Request failed with status: {}", response.status()));
}
let json: Value = response.json().await?;
Ok(json)
}
/// Get user's card collection from ai.card service
pub async fn get_user_cards(&self, user_did: &str) -> Result<Value> {
let endpoint = format!("api/v1/cards/user/{}", user_did);
self.call_service_get("ai.card", &endpoint).await
}
/// Draw a card for user from ai.card service
pub async fn draw_card(&self, user_did: &str, is_paid: bool) -> Result<Value> {
let params = serde_json::json!({
"user_did": user_did,
"is_paid": is_paid
});
self.call_service_method("ai.card", "api/v1/cards/draw", &params).await
}
/// Get card statistics from ai.card service
pub async fn get_card_stats(&self) -> Result<Value> {
self.call_service_get("ai.card", "api/v1/cards/gacha-stats").await
}
// MARK: - ai.log service methods
/// Create a new blog post
pub async fn create_blog_post<T: Serialize>(&self, params: &T) -> Result<Value> {
self.call_service_method("ai.log", "api/v1/posts", params).await
}
/// Get list of blog posts
pub async fn get_blog_posts(&self) -> Result<Value> {
self.call_service_get("ai.log", "api/v1/posts").await
}
/// Build the blog
pub async fn build_blog(&self) -> Result<Value> {
self.call_service_method("ai.log", "api/v1/build", &serde_json::json!({})).await
}
/// Translate document using ai.log service
pub async fn translate_document<T: Serialize>(&self, params: &T) -> Result<Value> {
self.call_service_method("ai.log", "api/v1/translate", params).await
}
/// Generate documentation using ai.log service
pub async fn generate_docs<T: Serialize>(&self, params: &T) -> Result<Value> {
self.call_service_method("ai.log", "api/v1/docs", params).await
}
}
/// Service status enum
#[derive(Debug, Clone)]
pub enum ServiceStatus {
Available,
Unavailable(String),
Error(String),
}
impl ServiceStatus {
pub fn is_available(&self) -> bool {
matches!(self, ServiceStatus::Available)
}
}
/// Service detector for ai ecosystem services
pub struct ServiceDetector {
client: ServiceClient,
}
impl ServiceDetector {
pub fn new() -> Self {
Self {
client: ServiceClient::new(),
}
}
/// Check all ai ecosystem services
pub async fn detect_services(&self) -> ServiceMap {
let mut services = ServiceMap::default();
// Check ai.card service
if let Ok(status) = self.client.check_service_status("http://localhost:8000").await {
services.ai_card = Some(ServiceInfo {
base_url: "http://localhost:8000".to_string(),
status,
});
}
// Check ai.log service
if let Ok(status) = self.client.check_service_status("http://localhost:8001").await {
services.ai_log = Some(ServiceInfo {
base_url: "http://localhost:8001".to_string(),
status,
});
}
// Check ai.bot service
if let Ok(status) = self.client.check_service_status("http://localhost:8002").await {
services.ai_bot = Some(ServiceInfo {
base_url: "http://localhost:8002".to_string(),
status,
});
}
services
}
/// Get available services only
pub async fn get_available_services(&self) -> Vec<String> {
let services = self.detect_services().await;
let mut available = Vec::new();
if let Some(card) = &services.ai_card {
if card.status.is_available() {
available.push("ai.card".to_string());
}
}
if let Some(log) = &services.ai_log {
if log.status.is_available() {
available.push("ai.log".to_string());
}
}
if let Some(bot) = &services.ai_bot {
if bot.status.is_available() {
available.push("ai.bot".to_string());
}
}
available
}
/// Get card collection statistics
pub async fn get_card_stats(&self) -> Result<serde_json::Value, Box<dyn std::error::Error>> {
match self.client.get_request("http://localhost:8000/api/v1/cards/gacha-stats").await {
Ok(stats) => Ok(stats),
Err(e) => Err(e.into()),
}
}
/// Draw a card for user
pub async fn draw_card(&self, user_did: &str, is_paid: bool) -> Result<serde_json::Value, Box<dyn std::error::Error>> {
let payload = serde_json::json!({
"user_did": user_did,
"is_paid": is_paid
});
match self.client.post_request("http://localhost:8000/api/v1/cards/draw", &payload).await {
Ok(card) => Ok(card),
Err(e) => Err(e.into()),
}
}
/// Get user's card collection
pub async fn get_user_cards(&self, user_did: &str) -> Result<serde_json::Value, Box<dyn std::error::Error>> {
let url = format!("http://localhost:8000/api/v1/cards/collection?did={}", user_did);
match self.client.get_request(&url).await {
Ok(collection) => Ok(collection),
Err(e) => Err(e.into()),
}
}
/// Get contextual memories for conversation mode
pub async fn get_contextual_memories(&self, _user_id: &str, _limit: usize) -> Result<Vec<crate::memory::Memory>, Box<dyn std::error::Error>> {
// This is a simplified version - in a real implementation this would call the MCP server
// For now, we'll return an empty vec to make compilation work
Ok(Vec::new())
}
/// Search memories by query
pub async fn search_memories(&self, _query: &str, _limit: usize) -> Result<Vec<crate::memory::Memory>, Box<dyn std::error::Error>> {
// This is a simplified version - in a real implementation this would call the MCP server
// For now, we'll return an empty vec to make compilation work
Ok(Vec::new())
}
/// Create context summary
pub async fn create_summary(&self, user_id: &str) -> Result<String, Box<dyn std::error::Error>> {
// This is a simplified version - in a real implementation this would call the MCP server
// For now, we'll return a placeholder summary
Ok(format!("Context summary for user: {}", user_id))
}
}
/// Service information
#[derive(Debug, Clone)]
pub struct ServiceInfo {
pub base_url: String,
pub status: ServiceStatus,
}
/// Map of all ai ecosystem services
#[derive(Debug, Clone, Default)]
pub struct ServiceMap {
pub ai_card: Option<ServiceInfo>,
pub ai_log: Option<ServiceInfo>,
pub ai_bot: Option<ServiceInfo>,
}
impl ServiceMap {
/// Get service info by name
pub fn get_service(&self, name: &str) -> Option<&ServiceInfo> {
match name {
"ai.card" => self.ai_card.as_ref(),
"ai.log" => self.ai_log.as_ref(),
"ai.bot" => self.ai_bot.as_ref(),
_ => None,
}
}
/// Check if a service is available
pub fn is_service_available(&self, name: &str) -> bool {
self.get_service(name)
.map(|info| info.status.is_available())
.unwrap_or(false)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[tokio::test]
async fn test_service_client_creation() {
let _client = ServiceClient::new();
// Basic test to ensure client can be created
assert!(true);
}
#[test]
fn test_service_status() {
let status = ServiceStatus::Available;
assert!(status.is_available());
let status = ServiceStatus::Unavailable("Connection refused".to_string());
assert!(!status.is_available());
}
#[test]
fn test_service_map() {
let mut map = ServiceMap::default();
assert!(!map.is_service_available("ai.card"));
map.ai_card = Some(ServiceInfo {
base_url: "http://localhost:8000".to_string(),
status: ServiceStatus::Available,
});
assert!(map.is_service_available("ai.card"));
assert!(!map.is_service_available("ai.log"));
}
}

View File

@ -1,331 +0,0 @@
use std::collections::HashMap;
use std::path::PathBuf;
use serde::Deserialize;
use anyhow::{Result, Context};
use colored::*;
use chrono::{DateTime, Utc};
use crate::config::Config;
use crate::persona::Persona;
use crate::memory::{Memory, MemoryType};
pub async fn handle_import_chatgpt(
file_path: PathBuf,
user_id: Option<String>,
data_dir: Option<PathBuf>,
) -> Result<()> {
let config = Config::new(data_dir)?;
let mut persona = Persona::new(&config)?;
let user_id = user_id.unwrap_or_else(|| "imported_user".to_string());
println!("{}", "🚀 Starting ChatGPT Import...".cyan().bold());
println!("File: {}", file_path.display().to_string().yellow());
println!("User ID: {}", user_id.yellow());
println!();
let mut importer = ChatGPTImporter::new(user_id);
let stats = importer.import_from_file(&file_path, &mut persona).await?;
// Display import statistics
println!("\n{}", "📊 Import Statistics".green().bold());
println!("Conversations imported: {}", stats.conversations_imported.to_string().cyan());
println!("Messages imported: {}", stats.messages_imported.to_string().cyan());
println!(" - User messages: {}", stats.user_messages.to_string().yellow());
println!(" - Assistant messages: {}", stats.assistant_messages.to_string().yellow());
if stats.skipped_messages > 0 {
println!(" - Skipped messages: {}", stats.skipped_messages.to_string().red());
}
// Show updated relationship
if let Some(relationship) = persona.get_relationship(&importer.user_id) {
println!("\n{}", "👥 Updated Relationship".blue().bold());
println!("Status: {}", relationship.status.to_string().yellow());
println!("Score: {:.2} / {}", relationship.score, relationship.threshold);
println!("Transmission enabled: {}",
if relationship.transmission_enabled { "".green() } else { "".red() });
}
println!("\n{}", "✅ ChatGPT import completed successfully!".green().bold());
Ok(())
}
#[derive(Debug, Clone)]
pub struct ImportStats {
pub conversations_imported: usize,
pub messages_imported: usize,
pub user_messages: usize,
pub assistant_messages: usize,
pub skipped_messages: usize,
}
impl Default for ImportStats {
fn default() -> Self {
ImportStats {
conversations_imported: 0,
messages_imported: 0,
user_messages: 0,
assistant_messages: 0,
skipped_messages: 0,
}
}
}
pub struct ChatGPTImporter {
user_id: String,
stats: ImportStats,
}
impl ChatGPTImporter {
pub fn new(user_id: String) -> Self {
ChatGPTImporter {
user_id,
stats: ImportStats::default(),
}
}
pub async fn import_from_file(&mut self, file_path: &PathBuf, persona: &mut Persona) -> Result<ImportStats> {
// Read and parse the JSON file
let content = std::fs::read_to_string(file_path)
.with_context(|| format!("Failed to read file: {}", file_path.display()))?;
let conversations: Vec<ChatGPTConversation> = serde_json::from_str(&content)
.context("Failed to parse ChatGPT export JSON")?;
println!("Found {} conversations to import", conversations.len());
// Import each conversation
for (i, conversation) in conversations.iter().enumerate() {
if i % 10 == 0 && i > 0 {
println!("Processed {} / {} conversations...", i, conversations.len());
}
match self.import_single_conversation(conversation, persona).await {
Ok(_) => {
self.stats.conversations_imported += 1;
}
Err(e) => {
println!("{}: Failed to import conversation '{}': {}",
"Warning".yellow(),
conversation.title.as_deref().unwrap_or("Untitled"),
e);
}
}
}
Ok(self.stats.clone())
}
async fn import_single_conversation(&mut self, conversation: &ChatGPTConversation, persona: &mut Persona) -> Result<()> {
// Extract messages from the mapping structure
let messages = self.extract_messages_from_mapping(&conversation.mapping)?;
if messages.is_empty() {
return Ok(());
}
// Process each message
for message in messages {
match self.process_message(&message, persona).await {
Ok(_) => {
self.stats.messages_imported += 1;
}
Err(_) => {
self.stats.skipped_messages += 1;
}
}
}
Ok(())
}
fn extract_messages_from_mapping(&self, mapping: &HashMap<String, ChatGPTNode>) -> Result<Vec<ChatGPTMessage>> {
let mut messages = Vec::new();
// Find all message nodes and collect them
for node in mapping.values() {
if let Some(message) = &node.message {
// Skip system messages and other non-user/assistant messages
if let Some(role) = &message.author.role {
match role.as_str() {
"user" | "assistant" => {
if let Some(content) = &message.content {
let content_text = if content.content_type == "text" && !content.parts.is_empty() {
// Extract text from parts (handle both strings and mixed content)
content.parts.iter()
.filter_map(|part| part.as_str())
.collect::<Vec<&str>>()
.join("\n")
} else if content.content_type == "multimodal_text" {
// Extract text parts from multimodal content
let mut text_parts = Vec::new();
for part in &content.parts {
if let Some(text) = part.as_str() {
if !text.is_empty() {
text_parts.push(text);
}
}
// Skip non-text parts (like image_asset_pointer)
}
if text_parts.is_empty() {
continue; // Skip if no text content
}
text_parts.join("\n")
} else if content.content_type == "user_editable_context" {
// Handle user context messages
if let Some(instructions) = &content.user_instructions {
format!("User instructions: {}", instructions)
} else if let Some(profile) = &content.user_profile {
format!("User profile: {}", profile)
} else {
continue; // Skip empty context messages
}
} else {
continue; // Skip other content types for now
};
if !content_text.trim().is_empty() {
messages.push(ChatGPTMessage {
role: role.clone(),
content: content_text,
create_time: message.create_time,
});
}
}
}
_ => {} // Skip system, tool, etc.
}
}
}
}
// Sort messages by creation time
messages.sort_by(|a, b| {
let time_a = a.create_time.unwrap_or(0.0);
let time_b = b.create_time.unwrap_or(0.0);
time_a.partial_cmp(&time_b).unwrap_or(std::cmp::Ordering::Equal)
});
Ok(messages)
}
async fn process_message(&mut self, message: &ChatGPTMessage, persona: &mut Persona) -> Result<()> {
let timestamp = self.convert_timestamp(message.create_time.unwrap_or(0.0))?;
match message.role.as_str() {
"user" => {
self.add_user_message(&message.content, timestamp, persona)?;
self.stats.user_messages += 1;
}
"assistant" => {
self.add_assistant_message(&message.content, timestamp, persona)?;
self.stats.assistant_messages += 1;
}
_ => {
return Err(anyhow::anyhow!("Unsupported message role: {}", message.role));
}
}
Ok(())
}
fn add_user_message(&self, content: &str, timestamp: DateTime<Utc>, persona: &mut Persona) -> Result<()> {
// Create high-importance memory for user messages
let memory = Memory {
id: uuid::Uuid::new_v4().to_string(),
user_id: self.user_id.clone(),
content: content.to_string(),
summary: None,
importance: 0.8, // High importance for imported user data
memory_type: MemoryType::Core,
created_at: timestamp,
last_accessed: timestamp,
access_count: 1,
};
// Add memory and update relationship
persona.add_memory(memory)?;
persona.update_relationship(&self.user_id, 1.0)?; // Positive relationship boost
Ok(())
}
fn add_assistant_message(&self, content: &str, timestamp: DateTime<Utc>, persona: &mut Persona) -> Result<()> {
// Create medium-importance memory for assistant responses
let memory = Memory {
id: uuid::Uuid::new_v4().to_string(),
user_id: self.user_id.clone(),
content: format!("[AI Response] {}", content),
summary: Some("Imported ChatGPT response".to_string()),
importance: 0.6, // Medium importance for AI responses
memory_type: MemoryType::Summary,
created_at: timestamp,
last_accessed: timestamp,
access_count: 1,
};
persona.add_memory(memory)?;
Ok(())
}
fn convert_timestamp(&self, unix_timestamp: f64) -> Result<DateTime<Utc>> {
if unix_timestamp <= 0.0 {
return Ok(Utc::now());
}
DateTime::from_timestamp(
unix_timestamp as i64,
((unix_timestamp % 1.0) * 1_000_000_000.0) as u32
).ok_or_else(|| anyhow::anyhow!("Invalid timestamp: {}", unix_timestamp))
}
}
// ChatGPT Export Data Structures
#[derive(Debug, Deserialize)]
pub struct ChatGPTConversation {
pub title: Option<String>,
pub create_time: Option<f64>,
pub mapping: HashMap<String, ChatGPTNode>,
}
#[derive(Debug, Deserialize)]
pub struct ChatGPTNode {
pub id: Option<String>,
pub message: Option<ChatGPTNodeMessage>,
pub parent: Option<String>,
pub children: Vec<String>,
}
#[derive(Debug, Deserialize)]
pub struct ChatGPTNodeMessage {
pub id: String,
pub author: ChatGPTAuthor,
pub create_time: Option<f64>,
pub content: Option<ChatGPTContent>,
}
#[derive(Debug, Deserialize)]
pub struct ChatGPTAuthor {
pub role: Option<String>,
pub name: Option<String>,
}
#[derive(Debug, Deserialize)]
pub struct ChatGPTContent {
pub content_type: String,
#[serde(default)]
pub parts: Vec<serde_json::Value>,
#[serde(default)]
pub user_profile: Option<String>,
#[serde(default)]
pub user_instructions: Option<String>,
}
// Simplified message structure for processing
#[derive(Debug, Clone)]
pub struct ChatGPTMessage {
pub role: String,
pub content: String,
pub create_time: Option<f64>,
}

View File

@ -1,20 +0,0 @@
#![allow(dead_code)]
pub mod ai_provider;
pub mod cli;
pub mod config;
pub mod conversation;
pub mod docs;
pub mod http_client;
pub mod import;
pub mod mcp_server;
pub mod memory;
pub mod openai_provider;
pub mod persona;
pub mod relationship;
pub mod scheduler;
pub mod shell;
pub mod status;
pub mod submodules;
pub mod tokens;
pub mod transmission;

View File

@ -1,251 +0,0 @@
#![allow(dead_code)]
use clap::{Parser, Subcommand};
use std::path::PathBuf;
mod ai_provider;
mod cli;
use cli::TokenCommands;
mod config;
mod conversation;
mod docs;
mod http_client;
mod import;
mod mcp_server;
mod memory;
mod openai_provider;
mod persona;
mod relationship;
mod scheduler;
mod shell;
mod status;
mod submodules;
mod tokens;
mod transmission;
#[derive(Parser)]
#[command(name = "aigpt")]
#[command(about = "AI.GPT - Autonomous transmission AI with unique personality")]
#[command(version)]
struct Cli {
#[command(subcommand)]
command: Commands,
}
#[derive(Subcommand)]
enum Commands {
/// Check AI status and relationships
Status {
/// User ID to check status for
user_id: Option<String>,
/// Data directory
#[arg(short, long)]
data_dir: Option<PathBuf>,
},
/// Chat with the AI
Chat {
/// User ID (atproto DID)
user_id: String,
/// Message to send to AI
message: String,
/// Data directory
#[arg(short, long)]
data_dir: Option<PathBuf>,
/// AI model to use
#[arg(short, long)]
model: Option<String>,
/// AI provider (ollama/openai)
#[arg(long)]
provider: Option<String>,
},
/// Start continuous conversation mode with MCP integration
Conversation {
/// User ID (atproto DID)
user_id: String,
/// Data directory
#[arg(short, long)]
data_dir: Option<PathBuf>,
/// AI model to use
#[arg(short, long)]
model: Option<String>,
/// AI provider (ollama/openai)
#[arg(long)]
provider: Option<String>,
},
/// Start continuous conversation mode with MCP integration (alias)
Conv {
/// User ID (atproto DID)
user_id: String,
/// Data directory
#[arg(short, long)]
data_dir: Option<PathBuf>,
/// AI model to use
#[arg(short, long)]
model: Option<String>,
/// AI provider (ollama/openai)
#[arg(long)]
provider: Option<String>,
},
/// Check today's AI fortune
Fortune {
/// Data directory
#[arg(short, long)]
data_dir: Option<PathBuf>,
},
/// List all relationships
Relationships {
/// Data directory
#[arg(short, long)]
data_dir: Option<PathBuf>,
},
/// Check and send autonomous transmissions
Transmit {
/// Data directory
#[arg(short, long)]
data_dir: Option<PathBuf>,
},
/// Run daily maintenance tasks
Maintenance {
/// Data directory
#[arg(short, long)]
data_dir: Option<PathBuf>,
},
/// Run scheduled tasks
Schedule {
/// Data directory
#[arg(short, long)]
data_dir: Option<PathBuf>,
},
/// Start MCP server
Server {
/// Port to listen on
#[arg(short, long, default_value = "8080")]
port: u16,
/// Data directory
#[arg(short, long)]
data_dir: Option<PathBuf>,
},
/// Interactive shell mode
Shell {
/// User ID (atproto DID)
user_id: String,
/// Data directory
#[arg(short, long)]
data_dir: Option<PathBuf>,
/// AI model to use
#[arg(short, long)]
model: Option<String>,
/// AI provider (ollama/openai)
#[arg(long)]
provider: Option<String>,
},
/// Import ChatGPT conversation data
ImportChatgpt {
/// Path to ChatGPT export JSON file
file_path: PathBuf,
/// User ID for imported conversations
#[arg(short, long)]
user_id: Option<String>,
/// Data directory
#[arg(short, long)]
data_dir: Option<PathBuf>,
},
/// Documentation management
Docs {
/// Action to perform (generate, sync, list, status)
action: String,
/// Project name for generate/sync actions
#[arg(short, long)]
project: Option<String>,
/// Output path for generated documentation
#[arg(short, long)]
output: Option<PathBuf>,
/// Enable AI integration for documentation enhancement
#[arg(long)]
ai_integration: bool,
/// Data directory
#[arg(short, long)]
data_dir: Option<PathBuf>,
},
/// Submodule management
Submodules {
/// Action to perform (list, update, status)
action: String,
/// Specific module to update
#[arg(short, long)]
module: Option<String>,
/// Update all submodules
#[arg(long)]
all: bool,
/// Show what would be done without making changes
#[arg(long)]
dry_run: bool,
/// Auto-commit changes after update
#[arg(long)]
auto_commit: bool,
/// Show verbose output
#[arg(short, long)]
verbose: bool,
/// Data directory
#[arg(short, long)]
data_dir: Option<PathBuf>,
},
/// Token usage analysis and cost estimation
Tokens {
#[command(subcommand)]
command: TokenCommands,
},
}
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let cli = Cli::parse();
match cli.command {
Commands::Status { user_id, data_dir } => {
status::handle_status(user_id, data_dir).await
}
Commands::Chat { user_id, message, data_dir, model, provider } => {
cli::handle_chat(user_id, message, data_dir, model, provider).await
}
Commands::Conversation { user_id, data_dir, model, provider } => {
conversation::handle_conversation(user_id, data_dir, model, provider).await
}
Commands::Conv { user_id, data_dir, model, provider } => {
conversation::handle_conversation(user_id, data_dir, model, provider).await
}
Commands::Fortune { data_dir } => {
cli::handle_fortune(data_dir).await
}
Commands::Relationships { data_dir } => {
cli::handle_relationships(data_dir).await
}
Commands::Transmit { data_dir } => {
cli::handle_transmit(data_dir).await
}
Commands::Maintenance { data_dir } => {
cli::handle_maintenance(data_dir).await
}
Commands::Schedule { data_dir } => {
cli::handle_schedule(data_dir).await
}
Commands::Server { port, data_dir } => {
cli::handle_server(Some(port), data_dir).await
}
Commands::Shell { user_id, data_dir, model, provider } => {
shell::handle_shell(user_id, data_dir, model, provider).await
}
Commands::ImportChatgpt { file_path, user_id, data_dir } => {
import::handle_import_chatgpt(file_path, user_id, data_dir).await
}
Commands::Docs { action, project, output, ai_integration, data_dir } => {
docs::handle_docs(action, project, output, ai_integration, data_dir).await
}
Commands::Submodules { action, module, all, dry_run, auto_commit, verbose, data_dir } => {
submodules::handle_submodules(action, module, all, dry_run, auto_commit, verbose, data_dir).await
}
Commands::Tokens { command } => {
tokens::handle_tokens(command).await
}
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,306 +0,0 @@
use std::collections::HashMap;
use serde::{Deserialize, Serialize};
use anyhow::{Result, Context};
use chrono::{DateTime, Utc};
use uuid::Uuid;
use crate::config::Config;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Memory {
pub id: String,
pub user_id: String,
pub content: String,
pub summary: Option<String>,
pub importance: f64,
pub memory_type: MemoryType,
pub created_at: DateTime<Utc>,
pub last_accessed: DateTime<Utc>,
pub access_count: u32,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum MemoryType {
Interaction,
Summary,
Core,
Forgotten,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct MemoryManager {
memories: HashMap<String, Memory>,
config: Config,
}
impl MemoryManager {
pub fn new(config: &Config) -> Result<Self> {
let memories = Self::load_memories(config)?;
Ok(MemoryManager {
memories,
config: config.clone(),
})
}
pub fn add_memory(&mut self, user_id: &str, content: &str, importance: f64) -> Result<String> {
let memory_id = Uuid::new_v4().to_string();
let now = Utc::now();
let memory = Memory {
id: memory_id.clone(),
user_id: user_id.to_string(),
content: content.to_string(),
summary: None,
importance,
memory_type: MemoryType::Interaction,
created_at: now,
last_accessed: now,
access_count: 1,
};
self.memories.insert(memory_id.clone(), memory);
self.save_memories()?;
Ok(memory_id)
}
pub fn get_memories(&mut self, user_id: &str, limit: usize) -> Vec<&Memory> {
// Get immutable references for sorting
let mut user_memory_ids: Vec<_> = self.memories
.iter()
.filter(|(_, m)| m.user_id == user_id)
.map(|(id, memory)| {
let score = memory.importance * 0.7 + (1.0 / ((Utc::now() - memory.created_at).num_hours() as f64 + 1.0)) * 0.3;
(id.clone(), score)
})
.collect();
// Sort by score
user_memory_ids.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap_or(std::cmp::Ordering::Equal));
// Update access information
let now = Utc::now();
for (memory_id, _) in user_memory_ids.into_iter().take(limit) {
if let Some(memory) = self.memories.get_mut(&memory_id) {
memory.last_accessed = now;
memory.access_count += 1;
// We can't return mutable references here, so we'll need to adjust the return type
}
}
// Return immutable references
self.memories
.values()
.filter(|m| m.user_id == user_id)
.take(limit)
.collect()
}
pub fn search_memories(&self, user_id: &str, keywords: &[String]) -> Vec<&Memory> {
self.memories
.values()
.filter(|m| {
m.user_id == user_id &&
keywords.iter().any(|keyword| {
m.content.to_lowercase().contains(&keyword.to_lowercase()) ||
m.summary.as_ref().map_or(false, |s| s.to_lowercase().contains(&keyword.to_lowercase()))
})
})
.collect()
}
pub fn get_contextual_memories(&self, user_id: &str, query: &str, limit: usize) -> Vec<&Memory> {
let query_lower = query.to_lowercase();
let mut relevant_memories: Vec<_> = self.memories
.values()
.filter(|m| {
m.user_id == user_id && (
m.content.to_lowercase().contains(&query_lower) ||
m.summary.as_ref().map_or(false, |s| s.to_lowercase().contains(&query_lower))
)
})
.collect();
// Sort by relevance (simple keyword matching for now)
relevant_memories.sort_by(|a, b| {
let score_a = Self::calculate_relevance_score(a, &query_lower);
let score_b = Self::calculate_relevance_score(b, &query_lower);
score_b.partial_cmp(&score_a).unwrap_or(std::cmp::Ordering::Equal)
});
relevant_memories.into_iter().take(limit).collect()
}
fn calculate_relevance_score(memory: &Memory, query: &str) -> f64 {
let content_matches = memory.content.to_lowercase().matches(query).count() as f64;
let summary_matches = memory.summary.as_ref()
.map_or(0.0, |s| s.to_lowercase().matches(query).count() as f64);
let relevance = (content_matches + summary_matches) * memory.importance;
let recency_bonus = 1.0 / ((Utc::now() - memory.created_at).num_days() as f64).max(1.0);
relevance + recency_bonus * 0.1
}
pub fn create_summary(&mut self, user_id: &str, content: &str) -> Result<String> {
// Simple summary creation (in real implementation, this would use AI)
let summary = if content.len() > 100 {
format!("{}...", &content[..97])
} else {
content.to_string()
};
self.add_memory(user_id, &summary, 0.8)
}
pub fn create_core_memory(&mut self, user_id: &str, content: &str) -> Result<String> {
let memory_id = Uuid::new_v4().to_string();
let now = Utc::now();
let memory = Memory {
id: memory_id.clone(),
user_id: user_id.to_string(),
content: content.to_string(),
summary: None,
importance: 1.0, // Core memories have maximum importance
memory_type: MemoryType::Core,
created_at: now,
last_accessed: now,
access_count: 1,
};
self.memories.insert(memory_id.clone(), memory);
self.save_memories()?;
Ok(memory_id)
}
pub fn get_memory_stats(&self, user_id: &str) -> MemoryStats {
let user_memories: Vec<_> = self.memories
.values()
.filter(|m| m.user_id == user_id)
.collect();
let total_memories = user_memories.len();
let core_memories = user_memories.iter()
.filter(|m| matches!(m.memory_type, MemoryType::Core))
.count();
let summary_memories = user_memories.iter()
.filter(|m| matches!(m.memory_type, MemoryType::Summary))
.count();
let interaction_memories = user_memories.iter()
.filter(|m| matches!(m.memory_type, MemoryType::Interaction))
.count();
let avg_importance = if total_memories > 0 {
user_memories.iter().map(|m| m.importance).sum::<f64>() / total_memories as f64
} else {
0.0
};
MemoryStats {
total_memories,
core_memories,
summary_memories,
interaction_memories,
avg_importance,
}
}
fn load_memories(config: &Config) -> Result<HashMap<String, Memory>> {
let file_path = config.memory_file();
if !file_path.exists() {
return Ok(HashMap::new());
}
let content = std::fs::read_to_string(file_path)
.context("Failed to read memories file")?;
let memories: HashMap<String, Memory> = serde_json::from_str(&content)
.context("Failed to parse memories file")?;
Ok(memories)
}
fn save_memories(&self) -> Result<()> {
let content = serde_json::to_string_pretty(&self.memories)
.context("Failed to serialize memories")?;
std::fs::write(&self.config.memory_file(), content)
.context("Failed to write memories file")?;
Ok(())
}
pub fn get_stats(&self) -> Result<MemoryStats> {
let total_memories = self.memories.len();
let core_memories = self.memories.values()
.filter(|m| matches!(m.memory_type, MemoryType::Core))
.count();
let summary_memories = self.memories.values()
.filter(|m| matches!(m.memory_type, MemoryType::Summary))
.count();
let interaction_memories = self.memories.values()
.filter(|m| matches!(m.memory_type, MemoryType::Interaction))
.count();
let avg_importance = if total_memories > 0 {
self.memories.values().map(|m| m.importance).sum::<f64>() / total_memories as f64
} else {
0.0
};
Ok(MemoryStats {
total_memories,
core_memories,
summary_memories,
interaction_memories,
avg_importance,
})
}
pub async fn run_maintenance(&mut self) -> Result<()> {
// Cleanup old, low-importance memories
let cutoff_date = Utc::now() - chrono::Duration::days(30);
let memory_ids_to_remove: Vec<String> = self.memories
.iter()
.filter(|(_, m)| {
m.importance < 0.3
&& m.created_at < cutoff_date
&& m.access_count <= 1
&& !matches!(m.memory_type, MemoryType::Core)
})
.map(|(id, _)| id.clone())
.collect();
for id in memory_ids_to_remove {
self.memories.remove(&id);
}
// Mark old memories as forgotten instead of deleting
let forgotten_cutoff = Utc::now() - chrono::Duration::days(90);
for memory in self.memories.values_mut() {
if memory.created_at < forgotten_cutoff
&& memory.importance < 0.2
&& !matches!(memory.memory_type, MemoryType::Core) {
memory.memory_type = MemoryType::Forgotten;
}
}
// Save changes
self.save_memories()?;
Ok(())
}
}
#[derive(Debug, Clone)]
pub struct MemoryStats {
pub total_memories: usize,
pub core_memories: usize,
pub summary_memories: usize,
pub interaction_memories: usize,
pub avg_importance: f64,
}

View File

@ -1,599 +0,0 @@
use anyhow::Result;
use async_openai::{
types::{
ChatCompletionRequestMessage,
CreateChatCompletionRequestArgs, ChatCompletionTool, ChatCompletionToolType,
FunctionObject, ChatCompletionRequestToolMessage,
ChatCompletionRequestAssistantMessage, ChatCompletionRequestUserMessage,
ChatCompletionRequestSystemMessage, ChatCompletionToolChoiceOption
},
Client,
};
use serde_json::{json, Value};
use crate::http_client::ServiceClient;
use crate::config::Config;
/// OpenAI provider with MCP tools support (matching Python implementation)
pub struct OpenAIProvider {
client: Client<async_openai::config::OpenAIConfig>,
model: String,
service_client: ServiceClient,
system_prompt: Option<String>,
config: Option<Config>,
}
impl OpenAIProvider {
pub fn new(api_key: String, model: Option<String>) -> Self {
let config = async_openai::config::OpenAIConfig::new()
.with_api_key(api_key);
let client = Client::with_config(config);
Self {
client,
model: model.unwrap_or_else(|| "gpt-4".to_string()),
service_client: ServiceClient::new(),
system_prompt: None,
config: None,
}
}
pub fn with_config(api_key: String, model: Option<String>, system_prompt: Option<String>, config: Config) -> Self {
let openai_config = async_openai::config::OpenAIConfig::new()
.with_api_key(api_key);
let client = Client::with_config(openai_config);
Self {
client,
model: model.unwrap_or_else(|| "gpt-4".to_string()),
service_client: ServiceClient::new(),
system_prompt,
config: Some(config),
}
}
fn get_mcp_base_url(&self) -> String {
if let Some(config) = &self.config {
if let Some(mcp) = &config.mcp {
if let Some(ai_gpt_server) = mcp.servers.get("ai_gpt") {
return ai_gpt_server.base_url.clone();
}
}
}
// Fallback to default
"http://localhost:8080".to_string()
}
/// Generate OpenAI tools from MCP endpoints (matching Python implementation)
fn get_mcp_tools(&self) -> Vec<ChatCompletionTool> {
let tools = vec![
// Memory tools
ChatCompletionTool {
r#type: ChatCompletionToolType::Function,
function: FunctionObject {
name: "get_memories".to_string(),
description: Some("Get past conversation memories".to_string()),
parameters: Some(json!({
"type": "object",
"properties": {
"limit": {
"type": "integer",
"description": "取得する記憶の数",
"default": 5
}
}
})),
},
},
ChatCompletionTool {
r#type: ChatCompletionToolType::Function,
function: FunctionObject {
name: "search_memories".to_string(),
description: Some("Search memories for specific topics or keywords".to_string()),
parameters: Some(json!({
"type": "object",
"properties": {
"keywords": {
"type": "array",
"items": {"type": "string"},
"description": "検索キーワードの配列"
}
},
"required": ["keywords"]
})),
},
},
ChatCompletionTool {
r#type: ChatCompletionToolType::Function,
function: FunctionObject {
name: "get_contextual_memories".to_string(),
description: Some("Get contextual memories related to a query".to_string()),
parameters: Some(json!({
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "検索クエリ"
},
"limit": {
"type": "integer",
"description": "取得する記憶の数",
"default": 5
}
},
"required": ["query"]
})),
},
},
ChatCompletionTool {
r#type: ChatCompletionToolType::Function,
function: FunctionObject {
name: "get_relationship".to_string(),
description: Some("Get relationship information with a specific user".to_string()),
parameters: Some(json!({
"type": "object",
"properties": {
"user_id": {
"type": "string",
"description": "ユーザーID"
}
},
"required": ["user_id"]
})),
},
},
// ai.card tools
ChatCompletionTool {
r#type: ChatCompletionToolType::Function,
function: FunctionObject {
name: "card_get_user_cards".to_string(),
description: Some("Get user's card collection".to_string()),
parameters: Some(json!({
"type": "object",
"properties": {
"did": {
"type": "string",
"description": "ユーザーのDID"
},
"limit": {
"type": "integer",
"description": "取得するカード数の上限",
"default": 10
}
},
"required": ["did"]
})),
},
},
ChatCompletionTool {
r#type: ChatCompletionToolType::Function,
function: FunctionObject {
name: "card_draw_card".to_string(),
description: Some("Draw a card from the gacha system".to_string()),
parameters: Some(json!({
"type": "object",
"properties": {
"did": {
"type": "string",
"description": "ユーザーのDID"
},
"is_paid": {
"type": "boolean",
"description": "有料ガチャかどうか",
"default": false
}
},
"required": ["did"]
})),
},
},
ChatCompletionTool {
r#type: ChatCompletionToolType::Function,
function: FunctionObject {
name: "card_analyze_collection".to_string(),
description: Some("Analyze user's card collection".to_string()),
parameters: Some(json!({
"type": "object",
"properties": {
"did": {
"type": "string",
"description": "ユーザーのDID"
}
},
"required": ["did"]
})),
},
},
ChatCompletionTool {
r#type: ChatCompletionToolType::Function,
function: FunctionObject {
name: "card_get_gacha_stats".to_string(),
description: Some("Get gacha statistics".to_string()),
parameters: Some(json!({
"type": "object",
"properties": {}
})),
},
},
];
tools
}
/// Chat interface with MCP function calling support (matching Python implementation)
pub async fn chat_with_mcp(&self, prompt: String, user_id: String) -> Result<String> {
let tools = self.get_mcp_tools();
let system_content = self.system_prompt.as_deref().unwrap_or(
"You are an AI assistant with access to memory, relationship data, and card game systems. Use the available tools when appropriate to provide accurate and contextual responses."
);
let request = CreateChatCompletionRequestArgs::default()
.model(&self.model)
.messages(vec![
ChatCompletionRequestMessage::System(
ChatCompletionRequestSystemMessage {
content: system_content.to_string().into(),
name: None,
}
),
ChatCompletionRequestMessage::User(
ChatCompletionRequestUserMessage {
content: prompt.clone().into(),
name: None,
}
),
])
.tools(tools.clone())
.tool_choice(ChatCompletionToolChoiceOption::Auto)
.max_tokens(2000u16)
.temperature(0.7)
.build()?;
let response = self.client.chat().create(request).await?;
let message = &response.choices[0].message;
// Handle tool calls
if let Some(tool_calls) = &message.tool_calls {
if tool_calls.is_empty() {
println!("🔧 [OpenAI] No tools called (empty array)");
} else {
println!("🔧 [OpenAI] {} tools called:", tool_calls.len());
for tc in tool_calls {
println!(" - {}({})", tc.function.name, tc.function.arguments);
}
}
} else {
println!("🔧 [OpenAI] No tools called (no tool_calls field)");
}
// Process tool calls if any
if let Some(tool_calls) = &message.tool_calls {
if !tool_calls.is_empty() {
let mut messages = vec![
ChatCompletionRequestMessage::System(
ChatCompletionRequestSystemMessage {
content: system_content.to_string().into(),
name: None,
}
),
ChatCompletionRequestMessage::User(
ChatCompletionRequestUserMessage {
content: prompt.into(),
name: None,
}
),
#[allow(deprecated)]
ChatCompletionRequestMessage::Assistant(
ChatCompletionRequestAssistantMessage {
content: message.content.clone(),
name: None,
tool_calls: message.tool_calls.clone(),
function_call: None,
}
),
];
// Execute each tool call
for tool_call in tool_calls {
println!("🌐 [MCP] Executing {}...", tool_call.function.name);
let tool_result = self.execute_mcp_tool(tool_call, &user_id).await?;
let result_preview = serde_json::to_string(&tool_result)?;
let preview = if result_preview.chars().count() > 100 {
format!("{}...", result_preview.chars().take(100).collect::<String>())
} else {
result_preview.clone()
};
println!("✅ [MCP] Result: {}", preview);
messages.push(ChatCompletionRequestMessage::Tool(
ChatCompletionRequestToolMessage {
content: serde_json::to_string(&tool_result)?,
tool_call_id: tool_call.id.clone(),
}
));
}
// Get final response with tool outputs
let final_request = CreateChatCompletionRequestArgs::default()
.model(&self.model)
.messages(messages)
.max_tokens(2000u16)
.temperature(0.7)
.build()?;
let final_response = self.client.chat().create(final_request).await?;
Ok(final_response.choices[0].message.content.as_ref().unwrap_or(&"".to_string()).clone())
} else {
// No tools were called
Ok(message.content.as_ref().unwrap_or(&"".to_string()).clone())
}
} else {
// No tool_calls field at all
Ok(message.content.as_ref().unwrap_or(&"".to_string()).clone())
}
}
/// Execute MCP tool call (matching Python implementation)
async fn execute_mcp_tool(&self, tool_call: &async_openai::types::ChatCompletionMessageToolCall, context_user_id: &str) -> Result<Value> {
let function_name = &tool_call.function.name;
let arguments: Value = serde_json::from_str(&tool_call.function.arguments)?;
match function_name.as_str() {
"get_memories" => {
let limit = arguments.get("limit").and_then(|v| v.as_i64()).unwrap_or(5);
// MCP server call to get memories
let base_url = self.get_mcp_base_url();
match self.service_client.get_request(&format!("{}/memories/{}", base_url, context_user_id)).await {
Ok(result) => {
// Extract the actual memory content from MCP response
if let Some(content) = result.get("result").and_then(|r| r.get("content")) {
if let Some(text_content) = content.get(0).and_then(|c| c.get("text")) {
// Parse the text content as JSON (it's a serialized array)
if let Ok(memories_array) = serde_json::from_str::<Vec<String>>(text_content.as_str().unwrap_or("[]")) {
let limited_memories: Vec<String> = memories_array.into_iter().take(limit as usize).collect();
Ok(json!({
"memories": limited_memories,
"count": limited_memories.len()
}))
} else {
Ok(json!({
"memories": [text_content.as_str().unwrap_or("No memories found")],
"count": 1
}))
}
} else {
Ok(json!({"memories": [], "count": 0, "info": "No memories available"}))
}
} else {
Ok(json!({"memories": [], "count": 0, "info": "No response from memory service"}))
}
}
Err(e) => {
Ok(json!({"error": format!("Failed to retrieve memories: {}", e)}))
}
}
}
"search_memories" => {
let keywords = arguments.get("keywords").and_then(|v| v.as_array()).unwrap_or(&vec![]).clone();
// Convert keywords to strings
let keyword_strings: Vec<String> = keywords.iter()
.filter_map(|k| k.as_str().map(|s| s.to_string()))
.collect();
if keyword_strings.is_empty() {
return Ok(json!({"error": "No keywords provided for search"}));
}
// MCP server call to search memories
let search_request = json!({
"keywords": keyword_strings
});
match self.service_client.post_request(
&format!("{}/memories/{}/search", self.get_mcp_base_url(), context_user_id),
&search_request
).await {
Ok(result) => {
// Extract the actual memory content from MCP response
if let Some(content) = result.get("result").and_then(|r| r.get("content")) {
if let Some(text_content) = content.get(0).and_then(|c| c.get("text")) {
// Parse the search results
if let Ok(search_result) = serde_json::from_str::<Vec<Value>>(text_content.as_str().unwrap_or("[]")) {
let memory_contents: Vec<String> = search_result.iter()
.filter_map(|item| item.get("content").and_then(|c| c.as_str().map(|s| s.to_string())))
.collect();
Ok(json!({
"memories": memory_contents,
"count": memory_contents.len(),
"keywords": keyword_strings
}))
} else {
Ok(json!({
"memories": [],
"count": 0,
"info": format!("No memories found for keywords: {}", keyword_strings.join(", "))
}))
}
} else {
Ok(json!({"memories": [], "count": 0, "info": "No search results available"}))
}
} else {
Ok(json!({"memories": [], "count": 0, "info": "No response from search service"}))
}
}
Err(e) => {
Ok(json!({"error": format!("Failed to search memories: {}", e)}))
}
}
}
"get_contextual_memories" => {
let query = arguments.get("query").and_then(|v| v.as_str()).unwrap_or("");
let limit = arguments.get("limit").and_then(|v| v.as_i64()).unwrap_or(5);
if query.is_empty() {
return Ok(json!({"error": "No query provided for contextual search"}));
}
// MCP server call to get contextual memories
let contextual_request = json!({
"query": query,
"limit": limit
});
match self.service_client.post_request(
&format!("{}/memories/{}/contextual", self.get_mcp_base_url(), context_user_id),
&contextual_request
).await {
Ok(result) => {
// Extract the actual memory content from MCP response
if let Some(content) = result.get("result").and_then(|r| r.get("content")) {
if let Some(text_content) = content.get(0).and_then(|c| c.get("text")) {
// Parse contextual search results
if text_content.as_str().unwrap_or("").contains("Found") {
// Extract memories from the formatted text response
let text = text_content.as_str().unwrap_or("");
if let Some(json_start) = text.find('[') {
if let Ok(memories_result) = serde_json::from_str::<Vec<Value>>(&text[json_start..]) {
let memory_contents: Vec<String> = memories_result.iter()
.filter_map(|item| item.get("content").and_then(|c| c.as_str().map(|s| s.to_string())))
.collect();
Ok(json!({
"memories": memory_contents,
"count": memory_contents.len(),
"query": query
}))
} else {
Ok(json!({
"memories": [],
"count": 0,
"info": format!("No contextual memories found for: {}", query)
}))
}
} else {
Ok(json!({
"memories": [],
"count": 0,
"info": format!("No contextual memories found for: {}", query)
}))
}
} else {
Ok(json!({
"memories": [],
"count": 0,
"info": format!("No contextual memories found for: {}", query)
}))
}
} else {
Ok(json!({"memories": [], "count": 0, "info": "No contextual results available"}))
}
} else {
Ok(json!({"memories": [], "count": 0, "info": "No response from contextual search service"}))
}
}
Err(e) => {
Ok(json!({"error": format!("Failed to get contextual memories: {}", e)}))
}
}
}
"get_relationship" => {
let target_user_id = arguments.get("user_id").and_then(|v| v.as_str()).unwrap_or(context_user_id);
// MCP server call to get relationship status
let base_url = self.get_mcp_base_url();
match self.service_client.get_request(&format!("{}/status/{}", base_url, target_user_id)).await {
Ok(result) => {
// Extract relationship information from MCP response
if let Some(content) = result.get("result").and_then(|r| r.get("content")) {
if let Some(text_content) = content.get(0).and_then(|c| c.get("text")) {
// Parse the status response to extract relationship data
if let Ok(status_data) = serde_json::from_str::<Value>(text_content.as_str().unwrap_or("{}")) {
if let Some(relationship) = status_data.get("relationship") {
Ok(json!({
"relationship": relationship,
"user_id": target_user_id
}))
} else {
Ok(json!({
"info": format!("No relationship found for user: {}", target_user_id),
"user_id": target_user_id
}))
}
} else {
Ok(json!({
"info": format!("Could not parse relationship data for user: {}", target_user_id),
"user_id": target_user_id
}))
}
} else {
Ok(json!({"info": "No relationship data available", "user_id": target_user_id}))
}
} else {
Ok(json!({"info": "No response from relationship service", "user_id": target_user_id}))
}
}
Err(e) => {
Ok(json!({"error": format!("Failed to get relationship: {}", e)}))
}
}
}
// ai.card tools
"card_get_user_cards" => {
let did = arguments.get("did").and_then(|v| v.as_str()).unwrap_or(context_user_id);
let _limit = arguments.get("limit").and_then(|v| v.as_i64()).unwrap_or(10);
match self.service_client.get_user_cards(did).await {
Ok(result) => Ok(result),
Err(e) => {
println!("❌ ai.card API error: {}", e);
Ok(json!({
"error": "ai.cardサーバーが起動していません",
"message": "カードシステムを使用するには、ai.cardサーバーを起動してください"
}))
}
}
}
"card_draw_card" => {
let did = arguments.get("did").and_then(|v| v.as_str()).unwrap_or(context_user_id);
let is_paid = arguments.get("is_paid").and_then(|v| v.as_bool()).unwrap_or(false);
match self.service_client.draw_card(did, is_paid).await {
Ok(result) => Ok(result),
Err(e) => {
println!("❌ ai.card API error: {}", e);
Ok(json!({
"error": "ai.cardサーバーが起動していません",
"message": "カードシステムを使用するには、ai.cardサーバーを起動してください"
}))
}
}
}
"card_analyze_collection" => {
let did = arguments.get("did").and_then(|v| v.as_str()).unwrap_or(context_user_id);
// TODO: Implement collection analysis endpoint
Ok(json!({
"info": "コレクション分析機能は実装中です",
"user_did": did
}))
}
"card_get_gacha_stats" => {
// TODO: Implement gacha stats endpoint
Ok(json!({"info": "ガチャ統計機能は実装中です"}))
}
_ => {
Ok(json!({
"error": format!("Unknown tool: {}", function_name)
}))
}
}
}
}

View File

@ -1,382 +0,0 @@
use std::collections::HashMap;
use serde::{Deserialize, Serialize};
use anyhow::Result;
use crate::config::Config;
use crate::memory::{MemoryManager, MemoryStats, Memory};
use crate::relationship::{RelationshipTracker, Relationship as RelationshipData, RelationshipStats};
use crate::ai_provider::{AIProviderClient, ChatMessage};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Persona {
config: Config,
#[serde(skip)]
memory_manager: Option<MemoryManager>,
#[serde(skip)]
relationship_tracker: Option<RelationshipTracker>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PersonaState {
pub current_mood: String,
pub fortune_value: i32,
pub breakthrough_triggered: bool,
pub base_personality: HashMap<String, f64>,
}
impl Persona {
pub fn new(config: &Config) -> Result<Self> {
let memory_manager = MemoryManager::new(config)?;
let relationship_tracker = RelationshipTracker::new(config)?;
Ok(Persona {
config: config.clone(),
memory_manager: Some(memory_manager),
relationship_tracker: Some(relationship_tracker),
})
}
pub fn get_current_state(&self) -> Result<PersonaState> {
// Load fortune
let fortune_value = self.load_today_fortune()?;
// Create base personality
let mut base_personality = HashMap::new();
base_personality.insert("curiosity".to_string(), 0.7);
base_personality.insert("empathy".to_string(), 0.8);
base_personality.insert("creativity".to_string(), 0.6);
base_personality.insert("analytical".to_string(), 0.9);
base_personality.insert("emotional".to_string(), 0.4);
// Determine mood based on fortune
let current_mood = match fortune_value {
1..=3 => "Contemplative",
4..=6 => "Neutral",
7..=8 => "Optimistic",
9..=10 => "Energetic",
_ => "Unknown",
};
Ok(PersonaState {
current_mood: current_mood.to_string(),
fortune_value,
breakthrough_triggered: fortune_value >= 9,
base_personality,
})
}
pub fn get_relationship(&self, user_id: &str) -> Option<&RelationshipData> {
self.relationship_tracker.as_ref()
.and_then(|tracker| tracker.get_relationship(user_id))
}
pub fn process_interaction(&mut self, user_id: &str, message: &str) -> Result<(String, f64)> {
// Add memory
if let Some(memory_manager) = &mut self.memory_manager {
memory_manager.add_memory(user_id, message, 0.5)?;
}
// Calculate sentiment (simple keyword-based for now)
let sentiment = self.calculate_sentiment(message);
// Update relationship
let relationship_delta = if let Some(relationship_tracker) = &mut self.relationship_tracker {
relationship_tracker.process_interaction(user_id, sentiment)?
} else {
0.0
};
// Generate response (simple for now)
let response = format!("I understand your message: '{}'", message);
Ok((response, relationship_delta))
}
pub async fn process_ai_interaction(&mut self, user_id: &str, message: &str, provider: Option<String>, model: Option<String>) -> Result<(String, f64)> {
// Add memory for user message
if let Some(memory_manager) = &mut self.memory_manager {
memory_manager.add_memory(user_id, message, 0.5)?;
}
// Calculate sentiment
let sentiment = self.calculate_sentiment(message);
// Update relationship
let relationship_delta = if let Some(relationship_tracker) = &mut self.relationship_tracker {
relationship_tracker.process_interaction(user_id, sentiment)?
} else {
0.0
};
// Check provider type and use appropriate client
let response = if provider.as_deref() == Some("openai") {
// Use OpenAI provider with MCP tools
use crate::openai_provider::OpenAIProvider;
// Get OpenAI API key from config or environment
let api_key = std::env::var("OPENAI_API_KEY")
.or_else(|_| {
self.config.providers.get("openai")
.and_then(|p| p.api_key.clone())
.ok_or_else(|| std::env::VarError::NotPresent)
})
.map_err(|_| anyhow::anyhow!("OpenAI API key not found. Set OPENAI_API_KEY environment variable or add to config."))?;
let openai_model = model.unwrap_or_else(|| "gpt-4".to_string());
// Get system prompt from config
let system_prompt = self.config.providers.get("openai")
.and_then(|p| p.system_prompt.clone());
let openai_provider = OpenAIProvider::with_config(api_key, Some(openai_model), system_prompt, self.config.clone());
// Use OpenAI with MCP tools support
let response = openai_provider.chat_with_mcp(message.to_string(), user_id.to_string()).await?;
// Add AI response to memory as well
if let Some(memory_manager) = &mut self.memory_manager {
memory_manager.add_memory(user_id, &format!("AI: {}", response), 0.3)?;
}
response
} else {
// Use existing AI provider (Ollama)
let ai_config = self.config.get_ai_config(provider, model)?;
let ai_client = AIProviderClient::new(ai_config);
// Build conversation context
let mut messages = Vec::new();
// Get recent memories for context
if let Some(memory_manager) = &mut self.memory_manager {
let recent_memories = memory_manager.get_memories(user_id, 5);
if !recent_memories.is_empty() {
let context = recent_memories.iter()
.map(|m| m.content.clone())
.collect::<Vec<_>>()
.join("\n");
messages.push(ChatMessage::system(format!("Previous conversation context:\n{}", context)));
}
}
// Add current message
messages.push(ChatMessage::user(message));
// Generate system prompt based on personality and relationship
let system_prompt = self.generate_system_prompt(user_id);
// Get AI response
match ai_client.chat(messages, Some(system_prompt)).await {
Ok(chat_response) => chat_response.content,
Err(_) => {
// Fallback to simple response if AI fails
format!("I understand your message: '{}'", message)
}
}
};
// Store AI response in memory
if let Some(memory_manager) = &mut self.memory_manager {
memory_manager.add_memory(user_id, &format!("AI: {}", response), 0.3)?;
}
Ok((response, relationship_delta))
}
fn generate_system_prompt(&self, user_id: &str) -> String {
let mut prompt = String::from("You are a helpful AI assistant with a unique personality. ");
// Add personality based on current state
if let Ok(state) = self.get_current_state() {
prompt.push_str(&format!("Your current mood is {}. ", state.current_mood));
if state.breakthrough_triggered {
prompt.push_str("You are feeling particularly inspired today! ");
}
// Add personality traits
let mut traits = Vec::new();
for (trait_name, value) in &state.base_personality {
if *value > 0.7 {
traits.push(trait_name.clone());
}
}
if !traits.is_empty() {
prompt.push_str(&format!("Your dominant traits are: {}. ", traits.join(", ")));
}
}
// Add relationship context
if let Some(relationship) = self.get_relationship(user_id) {
match relationship.status.to_string().as_str() {
"new" => prompt.push_str("This is a new relationship, be welcoming but cautious. "),
"friend" => prompt.push_str("You have a friendly relationship with this user. "),
"close_friend" => prompt.push_str("This is a close friend, be warm and personal. "),
"broken" => prompt.push_str("This relationship is strained, be formal and distant. "),
_ => {}
}
}
prompt.push_str("Keep responses concise and natural. Avoid being overly formal or robotic.");
prompt
}
fn calculate_sentiment(&self, message: &str) -> f64 {
// Simple sentiment analysis based on keywords
let positive_words = ["good", "great", "awesome", "love", "like", "happy", "thank"];
let negative_words = ["bad", "hate", "awful", "terrible", "angry", "sad"];
let message_lower = message.to_lowercase();
let positive_count = positive_words.iter()
.filter(|word| message_lower.contains(*word))
.count() as f64;
let negative_count = negative_words.iter()
.filter(|word| message_lower.contains(*word))
.count() as f64;
(positive_count - negative_count).max(-1.0).min(1.0)
}
pub fn get_memories(&mut self, user_id: &str, limit: usize) -> Vec<String> {
if let Some(memory_manager) = &mut self.memory_manager {
memory_manager.get_memories(user_id, limit)
.into_iter()
.map(|m| m.content.clone())
.collect()
} else {
Vec::new()
}
}
pub fn search_memories(&self, user_id: &str, keywords: &[String]) -> Vec<String> {
if let Some(memory_manager) = &self.memory_manager {
memory_manager.search_memories(user_id, keywords)
.into_iter()
.map(|m| m.content.clone())
.collect()
} else {
Vec::new()
}
}
pub fn get_memory_stats(&self, user_id: &str) -> Option<MemoryStats> {
self.memory_manager.as_ref()
.map(|manager| manager.get_memory_stats(user_id))
}
pub fn get_relationship_stats(&self) -> Option<RelationshipStats> {
self.relationship_tracker.as_ref()
.map(|tracker| tracker.get_relationship_stats())
}
pub fn add_memory(&mut self, memory: Memory) -> Result<()> {
if let Some(memory_manager) = &mut self.memory_manager {
memory_manager.add_memory(&memory.user_id, &memory.content, memory.importance)?;
}
Ok(())
}
pub fn update_relationship(&mut self, user_id: &str, delta: f64) -> Result<()> {
if let Some(relationship_tracker) = &mut self.relationship_tracker {
relationship_tracker.process_interaction(user_id, delta)?;
}
Ok(())
}
pub fn daily_maintenance(&mut self) -> Result<()> {
// Apply time decay to relationships
if let Some(relationship_tracker) = &mut self.relationship_tracker {
relationship_tracker.apply_time_decay()?;
}
Ok(())
}
fn load_today_fortune(&self) -> Result<i32> {
// Try to load existing fortune for today
if let Ok(content) = std::fs::read_to_string(self.config.fortune_file()) {
if let Ok(fortune_data) = serde_json::from_str::<serde_json::Value>(&content) {
let today = chrono::Utc::now().format("%Y-%m-%d").to_string();
if let Some(fortune) = fortune_data.get(&today) {
if let Some(value) = fortune.as_i64() {
return Ok(value as i32);
}
}
}
}
// Generate new fortune for today (1-10)
use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};
let today = chrono::Utc::now().format("%Y-%m-%d").to_string();
let mut hasher = DefaultHasher::new();
today.hash(&mut hasher);
let hash = hasher.finish();
let fortune = (hash % 10) as i32 + 1;
// Save fortune
let mut fortune_data = if let Ok(content) = std::fs::read_to_string(self.config.fortune_file()) {
serde_json::from_str(&content).unwrap_or_else(|_| serde_json::json!({}))
} else {
serde_json::json!({})
};
fortune_data[today] = serde_json::json!(fortune);
if let Ok(content) = serde_json::to_string_pretty(&fortune_data) {
let _ = std::fs::write(self.config.fortune_file(), content);
}
Ok(fortune)
}
pub fn list_all_relationships(&self) -> HashMap<String, RelationshipData> {
if let Some(tracker) = &self.relationship_tracker {
tracker.list_all_relationships().clone()
} else {
HashMap::new()
}
}
pub async fn process_message(&mut self, user_id: &str, message: &str) -> Result<ChatMessage> {
let (_response, _delta) = self.process_ai_interaction(user_id, message, None, None).await?;
Ok(ChatMessage::assistant(&_response))
}
pub fn get_fortune(&self) -> Result<i32> {
self.load_today_fortune()
}
pub fn generate_new_fortune(&self) -> Result<i32> {
use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};
let today = chrono::Utc::now().format("%Y-%m-%d").to_string();
let mut hasher = DefaultHasher::new();
today.hash(&mut hasher);
let hash = hasher.finish();
let fortune = (hash % 10) as i32 + 1;
// Save fortune
let mut fortune_data = if let Ok(content) = std::fs::read_to_string(self.config.fortune_file()) {
serde_json::from_str(&content).unwrap_or_else(|_| serde_json::json!({}))
} else {
serde_json::json!({})
};
fortune_data[today] = serde_json::json!(fortune);
if let Ok(content) = serde_json::to_string_pretty(&fortune_data) {
let _ = std::fs::write(self.config.fortune_file(), content);
}
Ok(fortune)
}
}

View File

@ -1,306 +0,0 @@
use std::collections::HashMap;
use serde::{Deserialize, Serialize};
use anyhow::{Result, Context};
use chrono::{DateTime, Utc};
use crate::config::Config;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Relationship {
pub user_id: String,
pub score: f64,
pub threshold: f64,
pub status: RelationshipStatus,
pub total_interactions: u32,
pub positive_interactions: u32,
pub negative_interactions: u32,
pub transmission_enabled: bool,
pub is_broken: bool,
pub last_interaction: Option<DateTime<Utc>>,
pub last_transmission: Option<DateTime<Utc>>,
pub created_at: DateTime<Utc>,
pub daily_interaction_count: u32,
pub last_daily_reset: DateTime<Utc>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum RelationshipStatus {
New,
Acquaintance,
Friend,
CloseFriend,
Broken,
}
impl std::fmt::Display for RelationshipStatus {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
RelationshipStatus::New => write!(f, "new"),
RelationshipStatus::Acquaintance => write!(f, "acquaintance"),
RelationshipStatus::Friend => write!(f, "friend"),
RelationshipStatus::CloseFriend => write!(f, "close_friend"),
RelationshipStatus::Broken => write!(f, "broken"),
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct RelationshipTracker {
relationships: HashMap<String, Relationship>,
config: Config,
}
impl RelationshipTracker {
pub fn new(config: &Config) -> Result<Self> {
let relationships = Self::load_relationships(config)?;
Ok(RelationshipTracker {
relationships,
config: config.clone(),
})
}
pub fn get_or_create_relationship(&mut self, user_id: &str) -> &mut Relationship {
let now = Utc::now();
self.relationships.entry(user_id.to_string()).or_insert_with(|| {
Relationship {
user_id: user_id.to_string(),
score: 0.0,
threshold: 10.0, // Default threshold for transmission
status: RelationshipStatus::New,
total_interactions: 0,
positive_interactions: 0,
negative_interactions: 0,
transmission_enabled: false,
is_broken: false,
last_interaction: None,
last_transmission: None,
created_at: now,
daily_interaction_count: 0,
last_daily_reset: now,
}
})
}
pub fn process_interaction(&mut self, user_id: &str, sentiment: f64) -> Result<f64> {
let now = Utc::now();
let score_change;
// Create relationship if it doesn't exist
{
let relationship = self.get_or_create_relationship(user_id);
// Reset daily count if needed
if (now - relationship.last_daily_reset).num_days() >= 1 {
relationship.daily_interaction_count = 0;
relationship.last_daily_reset = now;
}
// Apply daily interaction limit
if relationship.daily_interaction_count >= 10 {
return Ok(0.0); // No score change due to daily limit
}
// Store previous score for potential future logging
// Calculate score change based on sentiment
let mut base_score_change = sentiment * 0.5; // Base change
// Apply diminishing returns for high interaction counts
let interaction_factor = 1.0 / (1.0 + relationship.total_interactions as f64 * 0.01);
base_score_change *= interaction_factor;
score_change = base_score_change;
// Update relationship data
relationship.score += score_change;
relationship.score = relationship.score.max(-50.0).min(100.0); // Clamp score
relationship.total_interactions += 1;
relationship.daily_interaction_count += 1;
relationship.last_interaction = Some(now);
if sentiment > 0.0 {
relationship.positive_interactions += 1;
} else if sentiment < 0.0 {
relationship.negative_interactions += 1;
}
// Check for relationship breaking
if relationship.score <= -20.0 && !relationship.is_broken {
relationship.is_broken = true;
relationship.transmission_enabled = false;
relationship.status = RelationshipStatus::Broken;
}
// Enable transmission if threshold is reached
if relationship.score >= relationship.threshold && !relationship.is_broken {
relationship.transmission_enabled = true;
}
}
// Update status based on score (separate borrow)
self.update_relationship_status(user_id);
self.save_relationships()?;
Ok(score_change)
}
fn update_relationship_status(&mut self, user_id: &str) {
if let Some(relationship) = self.relationships.get_mut(user_id) {
if relationship.is_broken {
return; // Broken relationships cannot change status
}
relationship.status = match relationship.score {
score if score >= 50.0 => RelationshipStatus::CloseFriend,
score if score >= 20.0 => RelationshipStatus::Friend,
score if score >= 5.0 => RelationshipStatus::Acquaintance,
_ => RelationshipStatus::New,
};
}
}
pub fn apply_time_decay(&mut self) -> Result<()> {
let now = Utc::now();
let decay_rate = 0.1; // 10% decay per day
for relationship in self.relationships.values_mut() {
if let Some(last_interaction) = relationship.last_interaction {
let days_since_interaction = (now - last_interaction).num_days() as f64;
if days_since_interaction > 0.0 {
let decay_factor = (1.0_f64 - decay_rate).powf(days_since_interaction);
relationship.score *= decay_factor;
// Update status after decay
if relationship.score < relationship.threshold {
relationship.transmission_enabled = false;
}
}
}
}
// Update statuses for all relationships
let user_ids: Vec<String> = self.relationships.keys().cloned().collect();
for user_id in user_ids {
self.update_relationship_status(&user_id);
}
self.save_relationships()?;
Ok(())
}
pub fn get_relationship(&self, user_id: &str) -> Option<&Relationship> {
self.relationships.get(user_id)
}
pub fn list_all_relationships(&self) -> &HashMap<String, Relationship> {
&self.relationships
}
pub fn get_transmission_eligible(&self) -> HashMap<String, &Relationship> {
self.relationships
.iter()
.filter(|(_, rel)| rel.transmission_enabled && !rel.is_broken)
.map(|(id, rel)| (id.clone(), rel))
.collect()
}
pub fn record_transmission(&mut self, user_id: &str) -> Result<()> {
if let Some(relationship) = self.relationships.get_mut(user_id) {
relationship.last_transmission = Some(Utc::now());
self.save_relationships()?;
}
Ok(())
}
pub fn get_relationship_stats(&self) -> RelationshipStats {
let total_relationships = self.relationships.len();
let active_relationships = self.relationships
.values()
.filter(|r| r.total_interactions > 0)
.count();
let transmission_enabled = self.relationships
.values()
.filter(|r| r.transmission_enabled)
.count();
let broken_relationships = self.relationships
.values()
.filter(|r| r.is_broken)
.count();
let avg_score = if total_relationships > 0 {
self.relationships.values().map(|r| r.score).sum::<f64>() / total_relationships as f64
} else {
0.0
};
RelationshipStats {
total_relationships,
active_relationships,
transmission_enabled,
broken_relationships,
avg_score,
}
}
fn load_relationships(config: &Config) -> Result<HashMap<String, Relationship>> {
let file_path = config.relationships_file();
if !file_path.exists() {
return Ok(HashMap::new());
}
let content = std::fs::read_to_string(file_path)
.context("Failed to read relationships file")?;
let relationships: HashMap<String, Relationship> = serde_json::from_str(&content)
.context("Failed to parse relationships file")?;
Ok(relationships)
}
fn save_relationships(&self) -> Result<()> {
let content = serde_json::to_string_pretty(&self.relationships)
.context("Failed to serialize relationships")?;
std::fs::write(&self.config.relationships_file(), content)
.context("Failed to write relationships file")?;
Ok(())
}
pub fn get_all_relationships(&self) -> Result<HashMap<String, RelationshipCompact>> {
let mut result = HashMap::new();
for (user_id, relationship) in &self.relationships {
result.insert(user_id.clone(), RelationshipCompact {
score: relationship.score,
trust_level: relationship.score / 10.0, // Simplified trust calculation
interaction_count: relationship.total_interactions,
last_interaction: relationship.last_interaction.unwrap_or(relationship.created_at),
status: relationship.status.clone(),
});
}
Ok(result)
}
}
#[derive(Debug, Clone, Serialize)]
pub struct RelationshipStats {
pub total_relationships: usize,
pub active_relationships: usize,
pub transmission_enabled: usize,
pub broken_relationships: usize,
pub avg_score: f64,
}
#[derive(Debug, Clone, Serialize)]
pub struct RelationshipCompact {
pub score: f64,
pub trust_level: f64,
pub interaction_count: u32,
pub last_interaction: DateTime<Utc>,
pub status: RelationshipStatus,
}

View File

@ -1,458 +0,0 @@
use std::collections::HashMap;
use serde::{Deserialize, Serialize};
use anyhow::{Result, Context};
use chrono::{DateTime, Utc, Duration};
use crate::config::Config;
use crate::persona::Persona;
use crate::transmission::TransmissionController;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ScheduledTask {
pub id: String,
pub task_type: TaskType,
pub next_run: DateTime<Utc>,
pub interval_hours: Option<i64>,
pub enabled: bool,
pub last_run: Option<DateTime<Utc>>,
pub run_count: u32,
pub max_runs: Option<u32>,
pub created_at: DateTime<Utc>,
pub metadata: HashMap<String, String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum TaskType {
DailyMaintenance,
AutoTransmission,
RelationshipDecay,
BreakthroughCheck,
MaintenanceTransmission,
Custom(String),
}
impl std::fmt::Display for TaskType {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
TaskType::DailyMaintenance => write!(f, "daily_maintenance"),
TaskType::AutoTransmission => write!(f, "auto_transmission"),
TaskType::RelationshipDecay => write!(f, "relationship_decay"),
TaskType::BreakthroughCheck => write!(f, "breakthrough_check"),
TaskType::MaintenanceTransmission => write!(f, "maintenance_transmission"),
TaskType::Custom(name) => write!(f, "custom_{}", name),
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TaskExecution {
pub task_id: String,
pub execution_time: DateTime<Utc>,
pub duration_ms: u64,
pub success: bool,
pub result: Option<String>,
pub error: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AIScheduler {
config: Config,
tasks: HashMap<String, ScheduledTask>,
execution_history: Vec<TaskExecution>,
last_check: Option<DateTime<Utc>>,
}
impl AIScheduler {
pub fn new(config: &Config) -> Result<Self> {
let (tasks, execution_history) = Self::load_scheduler_data(config)?;
let mut scheduler = AIScheduler {
config: config.clone(),
tasks,
execution_history,
last_check: None,
};
// Initialize default tasks if none exist
if scheduler.tasks.is_empty() {
scheduler.create_default_tasks()?;
}
Ok(scheduler)
}
pub async fn run_scheduled_tasks(&mut self, persona: &mut Persona, transmission_controller: &mut TransmissionController) -> Result<Vec<TaskExecution>> {
let now = Utc::now();
let mut executions = Vec::new();
// Find tasks that are due to run
let due_task_ids: Vec<String> = self.tasks
.iter()
.filter(|(_, task)| task.enabled && task.next_run <= now)
.filter(|(_, task)| {
// Check if task hasn't exceeded max runs
if let Some(max_runs) = task.max_runs {
task.run_count < max_runs
} else {
true
}
})
.map(|(id, _)| id.clone())
.collect();
for task_id in due_task_ids {
let execution = self.execute_task(&task_id, persona, transmission_controller).await?;
executions.push(execution);
}
self.last_check = Some(now);
self.save_scheduler_data()?;
Ok(executions)
}
async fn execute_task(&mut self, task_id: &str, persona: &mut Persona, transmission_controller: &mut TransmissionController) -> Result<TaskExecution> {
let start_time = Utc::now();
let mut execution = TaskExecution {
task_id: task_id.to_string(),
execution_time: start_time,
duration_ms: 0,
success: false,
result: None,
error: None,
};
// Get task type without borrowing mutably
let task_type = {
let task = self.tasks.get(task_id)
.ok_or_else(|| anyhow::anyhow!("Task not found: {}", task_id))?;
task.task_type.clone()
};
// Execute the task based on its type
let result = match &task_type {
TaskType::DailyMaintenance => self.execute_daily_maintenance(persona, transmission_controller).await,
TaskType::AutoTransmission => self.execute_auto_transmission(persona, transmission_controller).await,
TaskType::RelationshipDecay => self.execute_relationship_decay(persona).await,
TaskType::BreakthroughCheck => self.execute_breakthrough_check(persona, transmission_controller).await,
TaskType::MaintenanceTransmission => self.execute_maintenance_transmission(persona, transmission_controller).await,
TaskType::Custom(name) => self.execute_custom_task(name, persona, transmission_controller).await,
};
let end_time = Utc::now();
execution.duration_ms = (end_time - start_time).num_milliseconds() as u64;
// Now update the task state with mutable borrow
match result {
Ok(message) => {
execution.success = true;
execution.result = Some(message);
// Update task state
if let Some(task) = self.tasks.get_mut(task_id) {
task.last_run = Some(start_time);
task.run_count += 1;
// Schedule next run if recurring
if let Some(interval_hours) = task.interval_hours {
task.next_run = start_time + Duration::hours(interval_hours);
} else {
// One-time task, disable it
task.enabled = false;
}
}
}
Err(e) => {
execution.error = Some(e.to_string());
// For failed tasks, retry in a shorter interval
if let Some(task) = self.tasks.get_mut(task_id) {
if task.interval_hours.is_some() {
task.next_run = start_time + Duration::minutes(15); // Retry in 15 minutes
}
}
}
}
self.execution_history.push(execution.clone());
// Keep only recent execution history (last 1000 executions)
if self.execution_history.len() > 1000 {
self.execution_history.drain(..self.execution_history.len() - 1000);
}
Ok(execution)
}
async fn execute_daily_maintenance(&self, persona: &mut Persona, transmission_controller: &mut TransmissionController) -> Result<String> {
// Run daily maintenance
persona.daily_maintenance()?;
// Check for maintenance transmissions
let transmissions = transmission_controller.check_maintenance_transmissions(persona).await?;
Ok(format!("Daily maintenance completed. {} maintenance transmissions sent.", transmissions.len()))
}
async fn execute_auto_transmission(&self, _persona: &mut Persona, transmission_controller: &mut TransmissionController) -> Result<String> {
let transmissions = transmission_controller.check_autonomous_transmissions(_persona).await?;
Ok(format!("Autonomous transmission check completed. {} transmissions sent.", transmissions.len()))
}
async fn execute_relationship_decay(&self, persona: &mut Persona) -> Result<String> {
persona.daily_maintenance()?;
Ok("Relationship time decay applied.".to_string())
}
async fn execute_breakthrough_check(&self, persona: &mut Persona, transmission_controller: &mut TransmissionController) -> Result<String> {
let transmissions = transmission_controller.check_breakthrough_transmissions(persona).await?;
Ok(format!("Breakthrough check completed. {} transmissions sent.", transmissions.len()))
}
async fn execute_maintenance_transmission(&self, persona: &mut Persona, transmission_controller: &mut TransmissionController) -> Result<String> {
let transmissions = transmission_controller.check_maintenance_transmissions(persona).await?;
Ok(format!("Maintenance transmission check completed. {} transmissions sent.", transmissions.len()))
}
async fn execute_custom_task(&self, _name: &str, _persona: &mut Persona, _transmission_controller: &mut TransmissionController) -> Result<String> {
// Placeholder for custom task execution
Ok("Custom task executed.".to_string())
}
pub fn create_task(&mut self, task_type: TaskType, next_run: DateTime<Utc>, interval_hours: Option<i64>) -> Result<String> {
let task_id = uuid::Uuid::new_v4().to_string();
let now = Utc::now();
let task = ScheduledTask {
id: task_id.clone(),
task_type,
next_run,
interval_hours,
enabled: true,
last_run: None,
run_count: 0,
max_runs: None,
created_at: now,
metadata: HashMap::new(),
};
self.tasks.insert(task_id.clone(), task);
self.save_scheduler_data()?;
Ok(task_id)
}
pub fn enable_task(&mut self, task_id: &str) -> Result<()> {
if let Some(task) = self.tasks.get_mut(task_id) {
task.enabled = true;
self.save_scheduler_data()?;
}
Ok(())
}
pub fn disable_task(&mut self, task_id: &str) -> Result<()> {
if let Some(task) = self.tasks.get_mut(task_id) {
task.enabled = false;
self.save_scheduler_data()?;
}
Ok(())
}
pub fn delete_task(&mut self, task_id: &str) -> Result<()> {
self.tasks.remove(task_id);
self.save_scheduler_data()?;
Ok(())
}
pub fn get_task(&self, task_id: &str) -> Option<&ScheduledTask> {
self.tasks.get(task_id)
}
pub fn get_tasks(&self) -> &HashMap<String, ScheduledTask> {
&self.tasks
}
pub fn get_due_tasks(&self) -> Vec<&ScheduledTask> {
let now = Utc::now();
self.tasks
.values()
.filter(|task| task.enabled && task.next_run <= now)
.collect()
}
pub fn get_execution_history(&self, limit: Option<usize>) -> Vec<&TaskExecution> {
let mut executions: Vec<_> = self.execution_history.iter().collect();
executions.sort_by(|a, b| b.execution_time.cmp(&a.execution_time));
match limit {
Some(limit) => executions.into_iter().take(limit).collect(),
None => executions,
}
}
pub fn get_scheduler_stats(&self) -> SchedulerStats {
let total_tasks = self.tasks.len();
let enabled_tasks = self.tasks.values().filter(|task| task.enabled).count();
let due_tasks = self.get_due_tasks().len();
let total_executions = self.execution_history.len();
let successful_executions = self.execution_history.iter()
.filter(|exec| exec.success)
.count();
let today = Utc::now().date_naive();
let today_executions = self.execution_history.iter()
.filter(|exec| exec.execution_time.date_naive() == today)
.count();
let avg_duration = if total_executions > 0 {
self.execution_history.iter()
.map(|exec| exec.duration_ms)
.sum::<u64>() as f64 / total_executions as f64
} else {
0.0
};
SchedulerStats {
total_tasks,
enabled_tasks,
due_tasks,
total_executions,
successful_executions,
today_executions,
success_rate: if total_executions > 0 {
successful_executions as f64 / total_executions as f64
} else {
0.0
},
avg_duration_ms: avg_duration,
}
}
fn create_default_tasks(&mut self) -> Result<()> {
let now = Utc::now();
// Daily maintenance task - run every day at 3 AM
let mut daily_maintenance_time = now.date_naive().and_hms_opt(3, 0, 0).unwrap().and_utc();
if daily_maintenance_time <= now {
daily_maintenance_time = daily_maintenance_time + Duration::days(1);
}
self.create_task(
TaskType::DailyMaintenance,
daily_maintenance_time,
Some(24), // 24 hours = 1 day
)?;
// Auto transmission check - every 4 hours
self.create_task(
TaskType::AutoTransmission,
now + Duration::hours(1),
Some(4),
)?;
// Breakthrough check - every 2 hours
self.create_task(
TaskType::BreakthroughCheck,
now + Duration::minutes(30),
Some(2),
)?;
// Maintenance transmission - once per day
let mut maintenance_time = now.date_naive().and_hms_opt(12, 0, 0).unwrap().and_utc();
if maintenance_time <= now {
maintenance_time = maintenance_time + Duration::days(1);
}
self.create_task(
TaskType::MaintenanceTransmission,
maintenance_time,
Some(24), // 24 hours = 1 day
)?;
Ok(())
}
fn load_scheduler_data(config: &Config) -> Result<(HashMap<String, ScheduledTask>, Vec<TaskExecution>)> {
let tasks_file = config.scheduler_tasks_file();
let history_file = config.scheduler_history_file();
let tasks = if tasks_file.exists() {
let content = std::fs::read_to_string(tasks_file)
.context("Failed to read scheduler tasks file")?;
serde_json::from_str(&content)
.context("Failed to parse scheduler tasks file")?
} else {
HashMap::new()
};
let history = if history_file.exists() {
let content = std::fs::read_to_string(history_file)
.context("Failed to read scheduler history file")?;
serde_json::from_str(&content)
.context("Failed to parse scheduler history file")?
} else {
Vec::new()
};
Ok((tasks, history))
}
fn save_scheduler_data(&self) -> Result<()> {
// Save tasks
let tasks_content = serde_json::to_string_pretty(&self.tasks)
.context("Failed to serialize scheduler tasks")?;
std::fs::write(&self.config.scheduler_tasks_file(), tasks_content)
.context("Failed to write scheduler tasks file")?;
// Save execution history
let history_content = serde_json::to_string_pretty(&self.execution_history)
.context("Failed to serialize scheduler history")?;
std::fs::write(&self.config.scheduler_history_file(), history_content)
.context("Failed to write scheduler history file")?;
Ok(())
}
}
// Type alias for compatibility with CLI interface
pub type Scheduler = AIScheduler;
impl Scheduler {
pub fn list_tasks(&self) -> Result<Vec<ScheduledTaskInfo>> {
let tasks: Vec<ScheduledTaskInfo> = self.tasks
.values()
.map(|task| ScheduledTaskInfo {
name: task.task_type.to_string(),
schedule: match task.interval_hours {
Some(hours) => format!("Every {} hours", hours),
None => "One-time".to_string(),
},
next_run: task.next_run,
enabled: task.enabled,
})
.collect();
Ok(tasks)
}
}
#[derive(Debug, Clone)]
pub struct SchedulerStats {
pub total_tasks: usize,
pub enabled_tasks: usize,
pub due_tasks: usize,
pub total_executions: usize,
pub successful_executions: usize,
pub today_executions: usize,
pub success_rate: f64,
pub avg_duration_ms: f64,
}
#[derive(Debug, Clone)]
pub struct ScheduledTaskInfo {
pub name: String,
pub schedule: String,
pub next_run: DateTime<Utc>,
pub enabled: bool,
}

View File

@ -1,608 +0,0 @@
use std::path::PathBuf;
use std::process::{Command, Stdio};
use std::io::{self, Write};
use anyhow::{Result, Context};
use colored::*;
use rustyline::error::ReadlineError;
use rustyline::Editor;
use rustyline::completion::{Completer, FilenameCompleter, Pair};
use rustyline::history::{History, DefaultHistory};
use rustyline::highlight::Highlighter;
use rustyline::hint::Hinter;
use rustyline::validate::Validator;
use rustyline::Helper;
use crate::config::Config;
use crate::persona::Persona;
use crate::ai_provider::{AIProviderClient, AIProvider, AIConfig};
pub async fn handle_shell(
user_id: String,
data_dir: Option<PathBuf>,
model: Option<String>,
provider: Option<String>,
) -> Result<()> {
let config = Config::new(data_dir)?;
let mut shell = ShellMode::new(config, user_id)?
.with_ai_provider(provider, model);
shell.run().await
}
pub struct ShellMode {
config: Config,
persona: Persona,
ai_provider: Option<AIProviderClient>,
user_id: String,
editor: Editor<ShellCompleter, DefaultHistory>,
}
struct ShellCompleter {
completer: FilenameCompleter,
}
impl ShellCompleter {
fn new() -> Self {
ShellCompleter {
completer: FilenameCompleter::new(),
}
}
}
impl Helper for ShellCompleter {}
impl Hinter for ShellCompleter {
type Hint = String;
fn hint(&self, _line: &str, _pos: usize, _ctx: &rustyline::Context<'_>) -> Option<String> {
None
}
}
impl Highlighter for ShellCompleter {}
impl Validator for ShellCompleter {}
impl Completer for ShellCompleter {
type Candidate = Pair;
fn complete(
&self,
line: &str,
pos: usize,
ctx: &rustyline::Context<'_>,
) -> rustyline::Result<(usize, Vec<Pair>)> {
// Custom completion for slash commands
if line.starts_with('/') {
let commands = vec![
"/status", "/relationships", "/memories", "/analyze",
"/fortune", "/clear", "/history", "/help", "/exit"
];
let word_start = line.rfind(' ').map_or(0, |i| i + 1);
let word = &line[word_start..pos];
let matches: Vec<Pair> = commands.iter()
.filter(|cmd| cmd.starts_with(word))
.map(|cmd| Pair {
display: cmd.to_string(),
replacement: cmd.to_string(),
})
.collect();
return Ok((word_start, matches));
}
// Custom completion for shell commands starting with !
if line.starts_with('!') {
let shell_commands = vec![
"ls", "pwd", "cd", "cat", "grep", "find", "ps", "top",
"echo", "mkdir", "rmdir", "cp", "mv", "rm", "touch",
"git", "cargo", "npm", "python", "node"
];
let word_start = line.rfind(' ').map_or(1, |i| i + 1); // Skip the '!'
let word = &line[word_start..pos];
let matches: Vec<Pair> = shell_commands.iter()
.filter(|cmd| cmd.starts_with(word))
.map(|cmd| Pair {
display: cmd.to_string(),
replacement: cmd.to_string(),
})
.collect();
return Ok((word_start, matches));
}
// Fallback to filename completion
self.completer.complete(line, pos, ctx)
}
}
impl ShellMode {
pub fn new(config: Config, user_id: String) -> Result<Self> {
let persona = Persona::new(&config)?;
// Setup rustyline editor with completer
let completer = ShellCompleter::new();
let mut editor = Editor::with_config(
rustyline::Config::builder()
.tab_stop(4)
.build()
)?;
editor.set_helper(Some(completer));
// Load history if exists
let history_file = config.data_dir.join("shell_history.txt");
if history_file.exists() {
let _ = editor.load_history(&history_file);
}
Ok(ShellMode {
config,
persona,
ai_provider: None,
user_id,
editor,
})
}
pub fn with_ai_provider(mut self, provider: Option<String>, model: Option<String>) -> Self {
// Use provided parameters or fall back to config defaults
let provider_name = provider
.or_else(|| Some(self.config.default_provider.clone()))
.unwrap_or_else(|| "ollama".to_string());
let model_name = model.or_else(|| {
// Try to get default model from config for the chosen provider
self.config.providers.get(&provider_name)
.map(|p| p.default_model.clone())
}).unwrap_or_else(|| {
// Final fallback based on provider
match provider_name.as_str() {
"openai" => "gpt-4o-mini".to_string(),
"ollama" => "qwen2.5-coder:latest".to_string(),
_ => "qwen2.5-coder:latest".to_string(),
}
});
let ai_provider = match provider_name.as_str() {
"ollama" => AIProvider::Ollama,
"openai" => AIProvider::OpenAI,
"claude" => AIProvider::Claude,
_ => AIProvider::Ollama, // Default fallback
};
let ai_config = AIConfig {
provider: ai_provider,
model: model_name,
api_key: None, // Will be loaded from environment if needed
base_url: None,
max_tokens: Some(2000),
temperature: Some(0.7),
};
let client = AIProviderClient::new(ai_config);
self.ai_provider = Some(client);
self
}
pub async fn run(&mut self) -> Result<()> {
println!("{}", "🚀 Starting ai.gpt Interactive Shell".cyan().bold());
// Show AI provider info
if let Some(ai_provider) = &self.ai_provider {
println!("{}: {} ({})",
"AI Provider".green().bold(),
ai_provider.get_provider().to_string(),
ai_provider.get_model());
} else {
println!("{}: {}", "AI Provider".yellow().bold(), "Simple mode (no AI)");
}
println!("{}", "Type 'help' for commands, 'exit' to quit".dimmed());
println!("{}", "Use Tab for command completion, Ctrl+C to interrupt, Ctrl+D to exit".dimmed());
loop {
// Read user input with rustyline (supports completion, history, etc.)
let readline = self.editor.readline("ai.shell> ");
match readline {
Ok(line) => {
let input = line.trim();
// Skip empty input
if input.is_empty() {
continue;
}
// Add to history
self.editor.add_history_entry(input)
.context("Failed to add to history")?;
// Handle input
if let Err(e) = self.handle_input(input).await {
println!("{}: {}", "Error".red().bold(), e);
}
}
Err(ReadlineError::Interrupted) => {
// Ctrl+C
println!("{}", "Use 'exit' or Ctrl+D to quit".yellow());
continue;
}
Err(ReadlineError::Eof) => {
// Ctrl+D
println!("\n{}", "Goodbye!".cyan());
break;
}
Err(err) => {
println!("{}: {}", "Input error".red().bold(), err);
break;
}
}
}
// Save history before exit
self.save_history()?;
Ok(())
}
async fn handle_input(&mut self, input: &str) -> Result<()> {
match input {
// Exit commands
"exit" | "quit" | "/exit" | "/quit" => {
println!("{}", "Goodbye!".cyan());
std::process::exit(0);
}
// Help command
"help" | "/help" => {
self.show_help();
}
// Shell commands (starting with !)
input if input.starts_with('!') => {
self.execute_shell_command(&input[1..]).await?;
}
// Slash commands (starting with /)
input if input.starts_with('/') => {
self.execute_slash_command(input).await?;
}
// AI conversation
_ => {
self.handle_ai_conversation(input).await?;
}
}
Ok(())
}
fn show_help(&self) {
println!("\n{}", "ai.gpt Interactive Shell Commands".cyan().bold());
println!();
println!("{}", "Navigation & Input:".yellow().bold());
println!(" {} - Tab completion for commands and files", "Tab".green());
println!(" {} - Command history (previous/next)", "↑/↓ or Ctrl+P/N".green());
println!(" {} - Interrupt current input", "Ctrl+C".green());
println!(" {} - Exit shell", "Ctrl+D".green());
println!();
println!("{}", "Basic Commands:".yellow().bold());
println!(" {} - Show this help", "help".green());
println!(" {} - Exit the shell", "exit, quit".green());
println!(" {} - Clear screen", "/clear".green());
println!(" {} - Show command history", "/history".green());
println!();
println!("{}", "Shell Commands:".yellow().bold());
println!(" {} - Execute shell command (Tab completion)", "!<command>".green());
println!(" {} - List files", "!ls".green());
println!(" {} - Show current directory", "!pwd".green());
println!(" {} - Git status", "!git status".green());
println!(" {} - Cargo build", "!cargo build".green());
println!();
println!("{}", "AI Commands:".yellow().bold());
println!(" {} - Show AI status and relationship", "/status".green());
println!(" {} - List all relationships", "/relationships".green());
println!(" {} - Show recent memories", "/memories".green());
println!(" {} - Analyze current directory", "/analyze".green());
println!(" {} - Show today's fortune", "/fortune".green());
println!();
println!("{}", "Conversation:".yellow().bold());
println!(" {} - Chat with AI using configured provider", "Any other input".green());
println!(" {} - AI responses track relationship changes", "Relationship tracking".dimmed());
println!();
}
async fn execute_shell_command(&self, command: &str) -> Result<()> {
println!("{} {}", "Executing:".blue().bold(), command.yellow());
let output = if cfg!(target_os = "windows") {
Command::new("cmd")
.args(["/C", command])
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.output()
.context("Failed to execute command")?
} else {
Command::new("sh")
.args(["-c", command])
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.output()
.context("Failed to execute command")?
};
// Print stdout
if !output.stdout.is_empty() {
let stdout = String::from_utf8_lossy(&output.stdout);
println!("{}", stdout);
}
// Print stderr in red
if !output.stderr.is_empty() {
let stderr = String::from_utf8_lossy(&output.stderr);
println!("{}", stderr.red());
}
// Show exit code if not successful
if !output.status.success() {
if let Some(code) = output.status.code() {
println!("{}: {}", "Exit code".red().bold(), code);
}
}
Ok(())
}
async fn execute_slash_command(&mut self, command: &str) -> Result<()> {
match command {
"/status" => {
self.show_ai_status().await?;
}
"/relationships" => {
self.show_relationships().await?;
}
"/memories" => {
self.show_memories().await?;
}
"/analyze" => {
self.analyze_directory().await?;
}
"/fortune" => {
self.show_fortune().await?;
}
"/clear" => {
// Clear screen
print!("\x1B[2J\x1B[1;1H");
io::stdout().flush()?;
}
"/history" => {
self.show_history();
}
_ => {
println!("{}: {}", "Unknown command".red().bold(), command);
println!("Type '{}' for available commands", "help".green());
}
}
Ok(())
}
async fn handle_ai_conversation(&mut self, input: &str) -> Result<()> {
let (response, relationship_delta) = if let Some(ai_provider) = &self.ai_provider {
// Use AI provider for response
self.persona.process_ai_interaction(&self.user_id, input,
Some(ai_provider.get_provider().to_string()),
Some(ai_provider.get_model().to_string())).await?
} else {
// Use simple response
self.persona.process_interaction(&self.user_id, input)?
};
// Display conversation
println!("{}: {}", "You".cyan().bold(), input);
println!("{}: {}", "AI".green().bold(), response);
// Show relationship change if significant
if relationship_delta.abs() >= 0.1 {
if relationship_delta > 0.0 {
println!("{}", format!("(+{:.2} relationship)", relationship_delta).green());
} else {
println!("{}", format!("({:.2} relationship)", relationship_delta).red());
}
}
println!(); // Add spacing
Ok(())
}
async fn show_ai_status(&self) -> Result<()> {
let state = self.persona.get_current_state()?;
println!("\n{}", "AI Status".cyan().bold());
println!("Mood: {}", state.current_mood.yellow());
println!("Fortune: {}/10", state.fortune_value.to_string().yellow());
if let Some(relationship) = self.persona.get_relationship(&self.user_id) {
println!("\n{}", "Your Relationship".cyan().bold());
println!("Status: {}", relationship.status.to_string().yellow());
println!("Score: {:.2} / {}", relationship.score, relationship.threshold);
println!("Interactions: {}", relationship.total_interactions);
}
println!();
Ok(())
}
async fn show_relationships(&self) -> Result<()> {
let relationships = self.persona.list_all_relationships();
if relationships.is_empty() {
println!("{}", "No relationships yet".yellow());
return Ok(());
}
println!("\n{}", "All Relationships".cyan().bold());
println!();
for (user_id, rel) in relationships {
let transmission = if rel.is_broken {
"💔"
} else if rel.transmission_enabled {
""
} else {
""
};
let user_display = if user_id.len() > 20 {
format!("{}...", &user_id[..20])
} else {
user_id
};
println!("{:<25} {:<12} {:<8} {}",
user_display.cyan(),
rel.status.to_string(),
format!("{:.2}", rel.score),
transmission);
}
println!();
Ok(())
}
async fn show_memories(&mut self) -> Result<()> {
let memories = self.persona.get_memories(&self.user_id, 10);
if memories.is_empty() {
println!("{}", "No memories yet".yellow());
return Ok(());
}
println!("\n{}", "Recent Memories".cyan().bold());
println!();
for (i, memory) in memories.iter().enumerate() {
println!("{}: {}",
format!("Memory {}", i + 1).dimmed(),
memory);
println!();
}
Ok(())
}
async fn analyze_directory(&self) -> Result<()> {
println!("{}", "Analyzing current directory...".blue().bold());
// Get current directory
let current_dir = std::env::current_dir()
.context("Failed to get current directory")?;
println!("Directory: {}", current_dir.display().to_string().yellow());
// List files and directories
let entries = std::fs::read_dir(&current_dir)
.context("Failed to read directory")?;
let mut files = Vec::new();
let mut dirs = Vec::new();
for entry in entries {
let entry = entry.context("Failed to read directory entry")?;
let path = entry.path();
let name = path.file_name()
.and_then(|n| n.to_str())
.unwrap_or("Unknown");
if path.is_dir() {
dirs.push(name.to_string());
} else {
files.push(name.to_string());
}
}
if !dirs.is_empty() {
println!("\n{}: {}", "Directories".blue().bold(), dirs.join(", "));
}
if !files.is_empty() {
println!("{}: {}", "Files".blue().bold(), files.join(", "));
}
// Check for common project files
let project_files = ["Cargo.toml", "package.json", "requirements.txt", "Makefile", "README.md"];
let found_files: Vec<_> = project_files.iter()
.filter(|&&file| files.contains(&file.to_string()))
.collect();
if !found_files.is_empty() {
println!("\n{}: {}", "Project files detected".green().bold(),
found_files.iter().map(|s| s.to_string()).collect::<Vec<_>>().join(", "));
}
println!();
Ok(())
}
async fn show_fortune(&self) -> Result<()> {
let state = self.persona.get_current_state()?;
let fortune_stars = "🌟".repeat(state.fortune_value as usize);
let empty_stars = "".repeat((10 - state.fortune_value) as usize);
println!("\n{}", "AI Fortune".yellow().bold());
println!("{}{}", fortune_stars, empty_stars);
println!("Today's Fortune: {}/10", state.fortune_value);
if state.breakthrough_triggered {
println!("{}", "⚡ BREAKTHROUGH! Special fortune activated!".yellow());
}
println!();
Ok(())
}
fn show_history(&self) {
println!("\n{}", "Command History".cyan().bold());
let history = self.editor.history();
if history.is_empty() {
println!("{}", "No commands in history".yellow());
return;
}
// Show last 20 commands
let start = if history.len() > 20 { history.len() - 20 } else { 0 };
for (i, entry) in history.iter().enumerate().skip(start) {
println!("{:2}: {}", i + 1, entry);
}
println!();
}
fn save_history(&mut self) -> Result<()> {
let history_file = self.config.data_dir.join("shell_history.txt");
self.editor.save_history(&history_file)
.context("Failed to save shell history")?;
Ok(())
}
}
// Extend AIProvider to have Display and helper methods
impl AIProvider {
fn to_string(&self) -> String {
match self {
AIProvider::OpenAI => "openai".to_string(),
AIProvider::Ollama => "ollama".to_string(),
AIProvider::Claude => "claude".to_string(),
}
}
}

View File

@ -1,51 +0,0 @@
use std::path::PathBuf;
use anyhow::Result;
use colored::*;
use crate::config::Config;
use crate::persona::Persona;
pub async fn handle_status(user_id: Option<String>, data_dir: Option<PathBuf>) -> Result<()> {
// Load configuration
let config = Config::new(data_dir)?;
// Initialize persona
let persona = Persona::new(&config)?;
// Get current state
let state = persona.get_current_state()?;
// Display AI status
println!("{}", "ai.gpt Status".cyan().bold());
println!("Mood: {}", state.current_mood);
println!("Fortune: {}/10", state.fortune_value);
if state.breakthrough_triggered {
println!("{}", "⚡ Breakthrough triggered!".yellow());
}
// Show personality traits
println!("\n{}", "Current Personality".cyan().bold());
for (trait_name, value) in &state.base_personality {
println!("{}: {:.2}", trait_name.cyan(), value);
}
// Show specific relationship if requested
if let Some(user_id) = user_id {
if let Some(relationship) = persona.get_relationship(&user_id) {
println!("\n{}: {}", "Relationship with".cyan(), user_id);
println!("Status: {}", relationship.status);
println!("Score: {:.2}", relationship.score);
println!("Total Interactions: {}", relationship.total_interactions);
println!("Transmission Enabled: {}", relationship.transmission_enabled);
if relationship.is_broken {
println!("{}", "⚠️ This relationship is broken and cannot be repaired.".red());
}
} else {
println!("\n{}: {}", "No relationship found with".yellow(), user_id);
}
}
Ok(())
}

View File

@ -1,480 +0,0 @@
use std::collections::HashMap;
use std::path::PathBuf;
use anyhow::{Result, Context};
use colored::*;
use serde::{Deserialize, Serialize};
use crate::config::Config;
pub async fn handle_submodules(
action: String,
module: Option<String>,
all: bool,
dry_run: bool,
auto_commit: bool,
verbose: bool,
data_dir: Option<PathBuf>,
) -> Result<()> {
let config = Config::new(data_dir)?;
let mut submodule_manager = SubmoduleManager::new(config);
match action.as_str() {
"list" => {
submodule_manager.list_submodules(verbose).await?;
}
"update" => {
submodule_manager.update_submodules(module, all, dry_run, auto_commit, verbose).await?;
}
"status" => {
submodule_manager.show_submodule_status().await?;
}
_ => {
return Err(anyhow::anyhow!("Unknown submodule action: {}", action));
}
}
Ok(())
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SubmoduleInfo {
pub name: String,
pub path: String,
pub branch: String,
pub current_commit: Option<String>,
pub target_commit: Option<String>,
pub status: String,
}
impl Default for SubmoduleInfo {
fn default() -> Self {
SubmoduleInfo {
name: String::new(),
path: String::new(),
branch: "main".to_string(),
current_commit: None,
target_commit: None,
status: "unknown".to_string(),
}
}
}
#[allow(dead_code)]
pub struct SubmoduleManager {
config: Config,
ai_root: PathBuf,
submodules: HashMap<String, SubmoduleInfo>,
}
impl SubmoduleManager {
pub fn new(config: Config) -> Self {
let ai_root = dirs::home_dir()
.unwrap_or_else(|| PathBuf::from("."))
.join("ai")
.join("ai");
SubmoduleManager {
config,
ai_root,
submodules: HashMap::new(),
}
}
pub async fn list_submodules(&mut self, verbose: bool) -> Result<()> {
println!("{}", "📋 Submodules Status".cyan().bold());
println!();
let submodules = self.parse_gitmodules()?;
if submodules.is_empty() {
println!("{}", "No submodules found".yellow());
return Ok(());
}
// Display submodules in a table format
println!("{:<15} {:<25} {:<15} {}",
"Module".cyan().bold(),
"Path".cyan().bold(),
"Branch".cyan().bold(),
"Status".cyan().bold());
println!("{}", "-".repeat(80));
for (module_name, module_info) in &submodules {
let status_color = match module_info.status.as_str() {
"clean" => module_info.status.green(),
"modified" => module_info.status.yellow(),
"missing" => module_info.status.red(),
"conflicts" => module_info.status.red(),
_ => module_info.status.normal(),
};
println!("{:<15} {:<25} {:<15} {}",
module_name.blue(),
module_info.path,
module_info.branch.green(),
status_color);
}
println!();
if verbose {
println!("Total submodules: {}", submodules.len().to_string().cyan());
println!("Repository root: {}", self.ai_root.display().to_string().blue());
}
Ok(())
}
pub async fn update_submodules(
&mut self,
module: Option<String>,
all: bool,
dry_run: bool,
auto_commit: bool,
verbose: bool
) -> Result<()> {
if !module.is_some() && !all {
return Err(anyhow::anyhow!("Either --module or --all is required"));
}
if module.is_some() && all {
return Err(anyhow::anyhow!("Cannot use both --module and --all"));
}
let submodules = self.parse_gitmodules()?;
if submodules.is_empty() {
println!("{}", "No submodules found".yellow());
return Ok(());
}
// Determine which modules to update
let modules_to_update: Vec<String> = if all {
submodules.keys().cloned().collect()
} else if let Some(module_name) = module {
if !submodules.contains_key(&module_name) {
return Err(anyhow::anyhow!(
"Submodule '{}' not found. Available modules: {}",
module_name,
submodules.keys().cloned().collect::<Vec<_>>().join(", ")
));
}
vec![module_name]
} else {
vec![]
};
if dry_run {
println!("{}", "🔍 DRY RUN MODE - No changes will be made".yellow().bold());
}
println!("{}", format!("🔄 Updating {} submodule(s)...", modules_to_update.len()).cyan().bold());
let mut updated_modules = Vec::new();
for module_name in modules_to_update {
if let Some(module_info) = submodules.get(&module_name) {
println!("\n{}", format!("📦 Processing: {}", module_name).blue().bold());
let module_path = PathBuf::from(&module_info.path);
let full_path = self.ai_root.join(&module_path);
if !full_path.exists() {
println!("{}", format!("❌ Module directory not found: {}", module_info.path).red());
continue;
}
// Get current commit
let current_commit = self.get_current_commit(&full_path)?;
if dry_run {
println!("{}", format!("🔍 Would update {} to branch {}", module_name, module_info.branch).yellow());
if let Some(ref commit) = current_commit {
println!("{}", format!("Current: {}", commit).dimmed());
}
continue;
}
// Perform update
if let Err(e) = self.update_single_module(&module_name, &module_info, &full_path).await {
println!("{}", format!("❌ Failed to update {}: {}", module_name, e).red());
continue;
}
// Get new commit
let new_commit = self.get_current_commit(&full_path)?;
if current_commit != new_commit {
println!("{}", format!("✅ Updated {} ({:?}{:?})",
module_name,
current_commit.as_deref().unwrap_or("unknown"),
new_commit.as_deref().unwrap_or("unknown")).green());
updated_modules.push((module_name.clone(), current_commit, new_commit));
} else {
println!("{}", "✅ Already up to date".green());
}
}
}
// Summary
if !updated_modules.is_empty() {
println!("\n{}", format!("🎉 Successfully updated {} module(s)", updated_modules.len()).green().bold());
if verbose {
for (module_name, old_commit, new_commit) in &updated_modules {
println!("{}: {:?}{:?}",
module_name,
old_commit.as_deref().unwrap_or("unknown"),
new_commit.as_deref().unwrap_or("unknown"));
}
}
if auto_commit && !dry_run {
self.auto_commit_changes(&updated_modules).await?;
} else if !dry_run {
println!("{}", "💾 Changes staged but not committed".yellow());
println!("Run with --auto-commit to commit automatically");
}
} else if !dry_run {
println!("{}", "No modules needed updating".yellow());
}
Ok(())
}
pub async fn show_submodule_status(&self) -> Result<()> {
println!("{}", "📊 Submodule Status Overview".cyan().bold());
println!();
let submodules = self.parse_gitmodules()?;
let mut total_modules = 0;
let mut clean_modules = 0;
let mut modified_modules = 0;
let mut missing_modules = 0;
for (module_name, module_info) in submodules {
let module_path = self.ai_root.join(&module_info.path);
if module_path.exists() {
total_modules += 1;
match module_info.status.as_str() {
"clean" => clean_modules += 1,
"modified" => modified_modules += 1,
_ => {}
}
} else {
missing_modules += 1;
}
println!("{}: {}",
module_name.blue(),
if module_path.exists() {
module_info.status.green()
} else {
"missing".red()
});
}
println!();
println!("Summary: {} total, {} clean, {} modified, {} missing",
total_modules.to_string().cyan(),
clean_modules.to_string().green(),
modified_modules.to_string().yellow(),
missing_modules.to_string().red());
Ok(())
}
fn parse_gitmodules(&self) -> Result<HashMap<String, SubmoduleInfo>> {
let gitmodules_path = self.ai_root.join(".gitmodules");
if !gitmodules_path.exists() {
return Ok(HashMap::new());
}
let content = std::fs::read_to_string(&gitmodules_path)
.with_context(|| format!("Failed to read .gitmodules file: {}", gitmodules_path.display()))?;
let mut submodules = HashMap::new();
let mut current_name: Option<String> = None;
let mut current_path: Option<String> = None;
for line in content.lines() {
let line = line.trim();
if line.starts_with("[submodule \"") && line.ends_with("\"]") {
// Save previous submodule if complete
if let (Some(name), Some(path)) = (current_name.take(), current_path.take()) {
let mut info = SubmoduleInfo::default();
info.name = name.clone();
info.path = path;
info.branch = self.get_target_branch(&name);
info.status = self.get_submodule_status(&name, &info.path)?;
submodules.insert(name, info);
}
// Extract new submodule name
current_name = Some(line[12..line.len()-2].to_string());
} else if line.starts_with("path = ") {
current_path = Some(line[7..].to_string());
}
}
// Save last submodule
if let (Some(name), Some(path)) = (current_name, current_path) {
let mut info = SubmoduleInfo::default();
info.name = name.clone();
info.path = path;
info.branch = self.get_target_branch(&name);
info.status = self.get_submodule_status(&name, &info.path)?;
submodules.insert(name, info);
}
Ok(submodules)
}
fn get_target_branch(&self, module_name: &str) -> String {
// Try to get from ai.json configuration
match module_name {
"verse" => "main".to_string(),
"card" => "main".to_string(),
"bot" => "main".to_string(),
_ => "main".to_string(),
}
}
fn get_submodule_status(&self, _module_name: &str, module_path: &str) -> Result<String> {
let full_path = self.ai_root.join(module_path);
if !full_path.exists() {
return Ok("missing".to_string());
}
// Check git status
let output = std::process::Command::new("git")
.args(&["submodule", "status", module_path])
.current_dir(&self.ai_root)
.output();
match output {
Ok(output) if output.status.success() => {
let stdout = String::from_utf8_lossy(&output.stdout);
if let Some(status_char) = stdout.chars().next() {
match status_char {
' ' => Ok("clean".to_string()),
'+' => Ok("modified".to_string()),
'-' => Ok("not_initialized".to_string()),
'U' => Ok("conflicts".to_string()),
_ => Ok("unknown".to_string()),
}
} else {
Ok("unknown".to_string())
}
}
_ => Ok("unknown".to_string())
}
}
fn get_current_commit(&self, module_path: &PathBuf) -> Result<Option<String>> {
let output = std::process::Command::new("git")
.args(&["rev-parse", "HEAD"])
.current_dir(module_path)
.output();
match output {
Ok(output) if output.status.success() => {
let commit = String::from_utf8_lossy(&output.stdout).trim().to_string();
if commit.len() >= 8 {
Ok(Some(commit[..8].to_string()))
} else {
Ok(Some(commit))
}
}
_ => Ok(None)
}
}
async fn update_single_module(
&self,
_module_name: &str,
module_info: &SubmoduleInfo,
module_path: &PathBuf
) -> Result<()> {
// Fetch latest changes
println!("{}", "Fetching latest changes...".dimmed());
let fetch_output = std::process::Command::new("git")
.args(&["fetch", "origin"])
.current_dir(module_path)
.output()?;
if !fetch_output.status.success() {
return Err(anyhow::anyhow!("Failed to fetch: {}",
String::from_utf8_lossy(&fetch_output.stderr)));
}
// Switch to target branch
println!("{}", format!("Switching to branch {}...", module_info.branch).dimmed());
let checkout_output = std::process::Command::new("git")
.args(&["checkout", &module_info.branch])
.current_dir(module_path)
.output()?;
if !checkout_output.status.success() {
return Err(anyhow::anyhow!("Failed to checkout {}: {}",
module_info.branch, String::from_utf8_lossy(&checkout_output.stderr)));
}
// Pull latest changes
let pull_output = std::process::Command::new("git")
.args(&["pull", "origin", &module_info.branch])
.current_dir(module_path)
.output()?;
if !pull_output.status.success() {
return Err(anyhow::anyhow!("Failed to pull: {}",
String::from_utf8_lossy(&pull_output.stderr)));
}
// Stage the submodule update
let add_output = std::process::Command::new("git")
.args(&["add", &module_info.path])
.current_dir(&self.ai_root)
.output()?;
if !add_output.status.success() {
return Err(anyhow::anyhow!("Failed to stage submodule: {}",
String::from_utf8_lossy(&add_output.stderr)));
}
Ok(())
}
async fn auto_commit_changes(&self, updated_modules: &[(String, Option<String>, Option<String>)]) -> Result<()> {
println!("{}", "💾 Auto-committing changes...".blue());
let mut commit_message = format!("Update submodules\n\n📦 Updated modules: {}\n", updated_modules.len());
for (module_name, old_commit, new_commit) in updated_modules {
commit_message.push_str(&format!(
"- {}: {}{}\n",
module_name,
old_commit.as_deref().unwrap_or("unknown"),
new_commit.as_deref().unwrap_or("unknown")
));
}
commit_message.push_str("\n🤖 Generated with aigpt-rs submodules update");
let commit_output = std::process::Command::new("git")
.args(&["commit", "-m", &commit_message])
.current_dir(&self.ai_root)
.output()?;
if commit_output.status.success() {
println!("{}", "✅ Changes committed successfully".green());
} else {
return Err(anyhow::anyhow!("Failed to commit: {}",
String::from_utf8_lossy(&commit_output.stderr)));
}
Ok(())
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,423 +0,0 @@
use std::collections::HashMap;
use serde::{Deserialize, Serialize};
use anyhow::{Result, Context};
use chrono::{DateTime, Utc};
use crate::config::Config;
use crate::persona::Persona;
use crate::relationship::{Relationship, RelationshipStatus};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TransmissionLog {
pub user_id: String,
pub message: String,
pub timestamp: DateTime<Utc>,
pub transmission_type: TransmissionType,
pub success: bool,
pub error: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum TransmissionType {
Autonomous, // AI decided to send
Scheduled, // Time-based trigger
Breakthrough, // Fortune breakthrough triggered
Maintenance, // Daily maintenance message
}
impl std::fmt::Display for TransmissionType {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
TransmissionType::Autonomous => write!(f, "autonomous"),
TransmissionType::Scheduled => write!(f, "scheduled"),
TransmissionType::Breakthrough => write!(f, "breakthrough"),
TransmissionType::Maintenance => write!(f, "maintenance"),
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TransmissionController {
config: Config,
transmission_history: Vec<TransmissionLog>,
last_check: Option<DateTime<Utc>>,
}
impl TransmissionController {
pub fn new(config: Config) -> Result<Self> {
let transmission_history = Self::load_transmission_history(&config)?;
Ok(TransmissionController {
config,
transmission_history,
last_check: None,
})
}
pub async fn check_autonomous_transmissions(&mut self, persona: &mut Persona) -> Result<Vec<TransmissionLog>> {
let mut transmissions = Vec::new();
let now = Utc::now();
// Get all transmission-eligible relationships
let eligible_user_ids: Vec<String> = {
let relationships = persona.list_all_relationships();
relationships.iter()
.filter(|(_, rel)| rel.transmission_enabled && !rel.is_broken)
.filter(|(_, rel)| rel.score >= rel.threshold)
.map(|(id, _)| id.clone())
.collect()
};
for user_id in eligible_user_ids {
// Get fresh relationship data for each check
if let Some(relationship) = persona.get_relationship(&user_id) {
// Check if enough time has passed since last transmission
if let Some(last_transmission) = relationship.last_transmission {
let hours_since_last = (now - last_transmission).num_hours();
if hours_since_last < 24 {
continue; // Skip if transmitted in last 24 hours
}
}
// Check if conditions are met for autonomous transmission
if self.should_transmit_to_user(&user_id, relationship, persona)? {
let transmission = self.generate_autonomous_transmission(persona, &user_id).await?;
transmissions.push(transmission);
}
}
}
self.last_check = Some(now);
self.save_transmission_history()?;
Ok(transmissions)
}
pub async fn check_breakthrough_transmissions(&mut self, persona: &mut Persona) -> Result<Vec<TransmissionLog>> {
let mut transmissions = Vec::new();
let state = persona.get_current_state()?;
// Only trigger breakthrough transmissions if fortune is very high
if !state.breakthrough_triggered || state.fortune_value < 9 {
return Ok(transmissions);
}
// Get close relationships for breakthrough sharing
let relationships = persona.list_all_relationships();
let close_friends: Vec<_> = relationships.iter()
.filter(|(_, rel)| matches!(rel.status, RelationshipStatus::Friend | RelationshipStatus::CloseFriend))
.filter(|(_, rel)| rel.transmission_enabled && !rel.is_broken)
.collect();
for (user_id, _relationship) in close_friends {
// Check if we haven't sent a breakthrough message today
let today = chrono::Utc::now().date_naive();
let already_sent_today = self.transmission_history.iter()
.any(|log| {
log.user_id == *user_id &&
matches!(log.transmission_type, TransmissionType::Breakthrough) &&
log.timestamp.date_naive() == today
});
if !already_sent_today {
let transmission = self.generate_breakthrough_transmission(persona, user_id).await?;
transmissions.push(transmission);
}
}
Ok(transmissions)
}
pub async fn check_maintenance_transmissions(&mut self, persona: &mut Persona) -> Result<Vec<TransmissionLog>> {
let mut transmissions = Vec::new();
let now = Utc::now();
// Only send maintenance messages once per day
let today = now.date_naive();
let already_sent_today = self.transmission_history.iter()
.any(|log| {
matches!(log.transmission_type, TransmissionType::Maintenance) &&
log.timestamp.date_naive() == today
});
if already_sent_today {
return Ok(transmissions);
}
// Apply daily maintenance to persona
persona.daily_maintenance()?;
// Get relationships that might need a maintenance check-in
let relationships = persona.list_all_relationships();
let maintenance_candidates: Vec<_> = relationships.iter()
.filter(|(_, rel)| rel.transmission_enabled && !rel.is_broken)
.filter(|(_, rel)| {
// Send maintenance to relationships that haven't been contacted in a while
if let Some(last_interaction) = rel.last_interaction {
let days_since = (now - last_interaction).num_days();
days_since >= 7 // Haven't talked in a week
} else {
false
}
})
.take(3) // Limit to 3 maintenance messages per day
.collect();
for (user_id, _) in maintenance_candidates {
let transmission = self.generate_maintenance_transmission(persona, user_id).await?;
transmissions.push(transmission);
}
Ok(transmissions)
}
fn should_transmit_to_user(&self, user_id: &str, relationship: &Relationship, persona: &Persona) -> Result<bool> {
// Basic transmission criteria
if !relationship.transmission_enabled || relationship.is_broken {
return Ok(false);
}
// Score must be above threshold
if relationship.score < relationship.threshold {
return Ok(false);
}
// Check transmission cooldown
if let Some(last_transmission) = relationship.last_transmission {
let hours_since = (Utc::now() - last_transmission).num_hours();
if hours_since < 24 {
return Ok(false);
}
}
// Calculate transmission probability based on relationship strength
let base_probability = match relationship.status {
RelationshipStatus::New => 0.1,
RelationshipStatus::Acquaintance => 0.2,
RelationshipStatus::Friend => 0.4,
RelationshipStatus::CloseFriend => 0.6,
RelationshipStatus::Broken => 0.0,
};
// Modify probability based on fortune
let state = persona.get_current_state()?;
let fortune_modifier = (state.fortune_value as f64 - 5.0) / 10.0; // -0.4 to +0.5
let final_probability = (base_probability + fortune_modifier).max(0.0).min(1.0);
// Simple random check (in real implementation, this would be more sophisticated)
use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};
let mut hasher = DefaultHasher::new();
user_id.hash(&mut hasher);
Utc::now().timestamp().hash(&mut hasher);
let hash = hasher.finish();
let random_value = (hash % 100) as f64 / 100.0;
Ok(random_value < final_probability)
}
async fn generate_autonomous_transmission(&mut self, persona: &mut Persona, user_id: &str) -> Result<TransmissionLog> {
let now = Utc::now();
// Get recent memories for context
let memories = persona.get_memories(user_id, 3);
let context = if !memories.is_empty() {
format!("Based on our recent conversations: {}", memories.join(", "))
} else {
"Starting a spontaneous conversation".to_string()
};
// Generate message using AI if available
let message = match self.generate_ai_message(persona, user_id, &context, TransmissionType::Autonomous).await {
Ok(msg) => msg,
Err(_) => {
// Fallback to simple messages
let fallback_messages = [
"Hey! How have you been?",
"Just thinking about our last conversation...",
"Hope you're having a good day!",
"Something interesting happened today and it reminded me of you.",
];
let index = (now.timestamp() as usize) % fallback_messages.len();
fallback_messages[index].to_string()
}
};
let log = TransmissionLog {
user_id: user_id.to_string(),
message,
timestamp: now,
transmission_type: TransmissionType::Autonomous,
success: true, // For now, assume success
error: None,
};
self.transmission_history.push(log.clone());
Ok(log)
}
async fn generate_breakthrough_transmission(&mut self, persona: &mut Persona, user_id: &str) -> Result<TransmissionLog> {
let now = Utc::now();
let state = persona.get_current_state()?;
let message = match self.generate_ai_message(persona, user_id, "Breakthrough moment - feeling inspired!", TransmissionType::Breakthrough).await {
Ok(msg) => msg,
Err(_) => {
format!("Amazing day today! ⚡ Fortune is at {}/10 and I'm feeling incredibly inspired. Had to share this energy with you!", state.fortune_value)
}
};
let log = TransmissionLog {
user_id: user_id.to_string(),
message,
timestamp: now,
transmission_type: TransmissionType::Breakthrough,
success: true,
error: None,
};
self.transmission_history.push(log.clone());
Ok(log)
}
async fn generate_maintenance_transmission(&mut self, persona: &mut Persona, user_id: &str) -> Result<TransmissionLog> {
let now = Utc::now();
let message = match self.generate_ai_message(persona, user_id, "Maintenance check-in", TransmissionType::Maintenance).await {
Ok(msg) => msg,
Err(_) => {
"Hey! It's been a while since we last talked. Just checking in to see how you're doing!".to_string()
}
};
let log = TransmissionLog {
user_id: user_id.to_string(),
message,
timestamp: now,
transmission_type: TransmissionType::Maintenance,
success: true,
error: None,
};
self.transmission_history.push(log.clone());
Ok(log)
}
async fn generate_ai_message(&self, _persona: &mut Persona, _user_id: &str, context: &str, transmission_type: TransmissionType) -> Result<String> {
// Try to use AI for message generation
let _system_prompt = format!(
"You are initiating a {} conversation. Context: {}. Keep the message casual, personal, and under 100 characters. Show genuine interest in the person.",
transmission_type, context
);
// This is a simplified version - in a real implementation, we'd use the AI provider
// For now, return an error to trigger fallback
Err(anyhow::anyhow!("AI provider not available for transmission generation"))
}
fn get_eligible_relationships(&self, persona: &Persona) -> Vec<String> {
persona.list_all_relationships().iter()
.filter(|(_, rel)| rel.transmission_enabled && !rel.is_broken)
.filter(|(_, rel)| rel.score >= rel.threshold)
.map(|(id, _)| id.clone())
.collect()
}
pub fn get_transmission_stats(&self) -> TransmissionStats {
let total_transmissions = self.transmission_history.len();
let successful_transmissions = self.transmission_history.iter()
.filter(|log| log.success)
.count();
let today = Utc::now().date_naive();
let today_transmissions = self.transmission_history.iter()
.filter(|log| log.timestamp.date_naive() == today)
.count();
let by_type = {
let mut counts = HashMap::new();
for log in &self.transmission_history {
*counts.entry(log.transmission_type.to_string()).or_insert(0) += 1;
}
counts
};
TransmissionStats {
total_transmissions,
successful_transmissions,
today_transmissions,
success_rate: if total_transmissions > 0 {
successful_transmissions as f64 / total_transmissions as f64
} else {
0.0
},
by_type,
}
}
pub fn get_recent_transmissions(&self, limit: usize) -> Vec<&TransmissionLog> {
let mut logs: Vec<_> = self.transmission_history.iter().collect();
logs.sort_by(|a, b| b.timestamp.cmp(&a.timestamp));
logs.into_iter().take(limit).collect()
}
fn load_transmission_history(config: &Config) -> Result<Vec<TransmissionLog>> {
let file_path = config.transmission_file();
if !file_path.exists() {
return Ok(Vec::new());
}
let content = std::fs::read_to_string(file_path)
.context("Failed to read transmission history file")?;
let history: Vec<TransmissionLog> = serde_json::from_str(&content)
.context("Failed to parse transmission history file")?;
Ok(history)
}
fn save_transmission_history(&self) -> Result<()> {
let content = serde_json::to_string_pretty(&self.transmission_history)
.context("Failed to serialize transmission history")?;
std::fs::write(&self.config.transmission_file(), content)
.context("Failed to write transmission history file")?;
Ok(())
}
pub async fn check_and_send(&mut self) -> Result<Vec<(String, String)>> {
let config = self.config.clone();
let mut persona = Persona::new(&config)?;
let mut results = Vec::new();
// Check autonomous transmissions
let autonomous = self.check_autonomous_transmissions(&mut persona).await?;
for log in autonomous {
if log.success {
results.push((log.user_id, log.message));
}
}
// Check breakthrough transmissions
let breakthrough = self.check_breakthrough_transmissions(&mut persona).await?;
for log in breakthrough {
if log.success {
results.push((log.user_id, log.message));
}
}
Ok(results)
}
}
#[derive(Debug, Clone)]
pub struct TransmissionStats {
pub total_transmissions: usize,
pub successful_transmissions: usize,
pub today_transmissions: usize,
pub success_rate: f64,
pub by_type: HashMap<String, usize>,
}