11 Commits

Author SHA1 Message Date
62b91e5e5a fix ref 2025-07-29 05:04:15 +09:00
4620d0862a add extended 2025-07-29 04:08:29 +09:00
93b523b1ba cleanup update 2025-07-29 03:31:08 +09:00
45c65e03b3 fix memory 2025-06-12 22:03:52 +09:00
73c516ab28 fix openai tools 2025-06-12 21:42:30 +09:00
e2e2758a83 fix tokens 2025-06-10 14:08:24 +09:00
5564db014a cleanup 2025-06-09 02:48:44 +09:00
6dadc41da7 Add card MCP tools integration and fix ServiceClient methods
### MCP Server Enhancement:
- Add 3 new card-related MCP tools: get_user_cards, draw_card, get_draw_status
- Fix ServiceClient missing methods for ai.card API integration
- Total MCP tools now: 20 (including card functionality)

### ServiceClient Fixes:
- Add get_user_cards() method for card collection retrieval
- Add draw_card() method for gacha functionality
- Fix JSON Value handling in card count display

### Integration Success:
- ai.gpt MCP server successfully starts with all 20 tools
- HTTP endpoints properly handle card-related requests
- Ready for ai.card server connection on port 8000

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-06-09 02:33:06 +09:00
64e519d719 Fix Rust compilation warnings and enhance MCP server functionality
## Compilation Fixes
- Resolve borrow checker error in docs.rs by using proper reference (`&home_content`)
- Remove unused imports across all modules to eliminate import warnings
- Fix unused variables in memory.rs and relationship.rs
- Add `#\![allow(dead_code)]` to suppress intentional API method warnings
- Update test variables to use underscore prefix for unused parameters

## MCP Server Enhancements
- Add `handle_direct_tool_call` method for HTTP endpoint compatibility
- Fix MCP tool routing to support direct HTTP calls to `/mcp/call/{tool_name}`
- Ensure all 17 MCP tools are accessible via both standard and HTTP protocols
- Improve error handling for unknown methods and tool calls

## Memory System Verification
- Confirm memory persistence and retrieval functionality
- Verify contextual memory search with query filtering
- Test relationship tracking across multiple users
- Validate ai.shell integration with OpenAI GPT-4o-mini

## Build Quality
- Achieve zero compilation errors and zero critical warnings
- Pass all 5 unit tests successfully
- Maintain clean build with suppressed intentional API warnings
- Update dependencies via `cargo update`

## Performance Results
 Memory system: Functional (remembers "Rust移行について話していましたね")
 MCP server: 17 tools operational on port 8080
 Relationship tracking: Active for 6 users with interaction history
 ai.shell: Seamless integration with persistent memory

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-06-08 07:58:03 +09:00
ed6d6e0d47 fix cli 2025-06-08 06:41:41 +09:00
582b983a32 Complete ai.gpt Python to Rust migration
- Add complete Rust implementation (aigpt-rs) with 16 commands
- Implement MCP server with 16+ tools including memory management, shell integration, and service communication
- Add conversation mode with interactive MCP commands (/memories, /search, /context, /cards)
- Implement token usage analysis for Claude Code with cost calculation
- Add HTTP client for ai.card, ai.log, ai.bot service integration
- Create comprehensive documentation and README
- Maintain backward compatibility with Python implementation
- Achieve 7x faster startup, 3x faster response times, 73% memory reduction vs Python

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-06-07 17:42:36 +09:00
64 changed files with 1570 additions and 9711 deletions

View File

@@ -1,57 +0,0 @@
{
"permissions": {
"allow": [
"Bash(mv:*)",
"Bash(mkdir:*)",
"Bash(chmod:*)",
"Bash(git submodule:*)",
"Bash(source:*)",
"Bash(pip install:*)",
"Bash(/Users/syui/.config/syui/ai/gpt/venv/bin/aigpt shell)",
"Bash(/Users/syui/.config/syui/ai/gpt/venv/bin/aigpt server --model qwen2.5-coder:7b --port 8001)",
"Bash(/Users/syui/.config/syui/ai/gpt/venv/bin/python -c \"import fastapi_mcp; help(fastapi_mcp.FastApiMCP)\")",
"Bash(find:*)",
"Bash(/Users/syui/.config/syui/ai/gpt/venv/bin/pip install -e .)",
"Bash(/Users/syui/.config/syui/ai/gpt/venv/bin/aigpt fortune)",
"Bash(lsof:*)",
"Bash(/Users/syui/.config/syui/ai/gpt/venv/bin/python -c \"\nfrom src.aigpt.mcp_server import AIGptMcpServer\nfrom pathlib import Path\nimport uvicorn\n\ndata_dir = Path.home() / '.config' / 'syui' / 'ai' / 'gpt' / 'data'\ndata_dir.mkdir(parents=True, exist_ok=True)\n\ntry:\n server = AIGptMcpServer(data_dir)\n print('MCP Server created successfully')\n print('Available endpoints:', [route.path for route in server.app.routes])\nexcept Exception as e:\n print('Error:', e)\n import traceback\n traceback.print_exc()\n\")",
"Bash(ls:*)",
"Bash(grep:*)",
"Bash(python -m pip install:*)",
"Bash(python:*)",
"Bash(RELOAD=false ./start_server.sh)",
"Bash(sed:*)",
"Bash(curl:*)",
"Bash(~/.config/syui/ai/card/venv/bin/pip install greenlet)",
"Bash(~/.config/syui/ai/card/venv/bin/python init_db.py)",
"Bash(sqlite3:*)",
"Bash(aigpt --help)",
"Bash(aigpt status)",
"Bash(aigpt fortune)",
"Bash(aigpt relationships)",
"Bash(aigpt transmit)",
"Bash(aigpt config:*)",
"Bash(kill:*)",
"Bash(timeout:*)",
"Bash(rm:*)",
"Bash(rg:*)",
"Bash(aigpt server --help)",
"Bash(cat:*)",
"Bash(aigpt import-chatgpt:*)",
"Bash(aigpt chat:*)",
"Bash(echo:*)",
"Bash(aigpt shell:*)",
"Bash(aigpt maintenance)",
"Bash(aigpt status syui)",
"Bash(cp:*)",
"Bash(./setup_venv.sh:*)",
"WebFetch(domain:docs.anthropic.com)",
"Bash(launchctl:*)",
"Bash(sudo lsof:*)",
"Bash(sudo:*)",
"Bash(cargo check:*)",
"Bash(cargo run:*)"
],
"deny": []
}
}

View File

@@ -1,5 +0,0 @@
# OpenAI API Key (required for OpenAI provider)
OPENAI_API_KEY=your-api-key-here
# Ollama settings (optional)
OLLAMA_HOST=http://localhost:11434

32
.gitignore vendored
View File

@@ -1,8 +1,24 @@
**target
**.lock
output.json
config/*.db
mcp/scripts/__*
data
__pycache__
conversations.json
# Rust
target/
Cargo.lock
# Database files
*.db
*.db-shm
*.db-wal
# IDE
.idea/
.vscode/
*.swp
*.swo
# OS
.DS_Store
Thumbs.db
# Logs
*.log
json
gpt
.claude

10
.gitmodules vendored
View File

@@ -1,10 +0,0 @@
[submodule "shell"]
path = shell
url = git@git.syui.ai:ai/shell
[submodule "card"]
path = card
url = git@git.syui.ai:ai/card
branch = claude
[submodule "log"]
path = log
url = git@git.syui.ai:ai/log

48
Cargo.toml Normal file
View File

@@ -0,0 +1,48 @@
[package]
name = "aigpt"
version = "0.1.0"
edition = "2021"
authors = ["syui"]
description = "Simple memory storage for Claude with MCP"
[[bin]]
name = "aigpt"
path = "src/main.rs"
[[bin]]
name = "memory-mcp"
path = "src/bin/mcp_server.rs"
[[bin]]
name = "memory-mcp-extended"
path = "src/bin/mcp_server_extended.rs"
[dependencies]
# CLI and async
clap = { version = "4.5", features = ["derive"] }
tokio = { version = "1.40", features = ["full"] }
# JSON and serialization
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
# Date/time and UUID
chrono = { version = "0.4", features = ["serde"] }
uuid = { version = "1.10", features = ["v4"] }
# Error handling and utilities
anyhow = "1.0"
dirs = "5.0"
# Extended features (optional)
reqwest = { version = "0.11", features = ["json"], optional = true }
scraper = { version = "0.18", optional = true }
openai = { version = "1.1", optional = true }
[features]
default = []
extended = ["semantic-search", "ai-analysis", "web-integration"]
semantic-search = ["openai"]
ai-analysis = ["openai"]
web-integration = ["reqwest", "scraper"]

1033
README.md

File diff suppressed because it is too large Load Diff

View File

@@ -1,63 +0,0 @@
# ai.shell プロジェクト仕様書
## 概要
ai.shellは、AIを活用したインタラクティブなシェル環境です。Claude Codeのような体験を提供し、プロジェクトの目標と仕様をAIが理解して、開発を支援します。
## 主要機能
### 1. インタラクティブシェル
- AIとの対話型インターフェース
- シェルコマンドの実行(!command形式
- 高度な補完機能
- コマンド履歴
### 2. AI支援機能
- **analyze <file>**: ファイルの分析
- **generate <description>**: コード生成
- **explain <topic>**: 概念の説明
- **load**: プロジェクト仕様(このファイル)の読み込み
### 3. ai.gpt統合
- 関係性ベースのAI人格
- 記憶システム
- 運勢システムによる応答の変化
## 使用方法
```bash
# ai.shellを起動
aigpt shell
# プロジェクト仕様を読み込み
ai.shell> load
# ファイルを分析
ai.shell> analyze src/main.py
# コードを生成
ai.shell> generate Python function to calculate fibonacci
# シェルコマンドを実行
ai.shell> !ls -la
# AIと対話
ai.shell> How can I improve this code?
```
## 技術スタック
- Python 3.10+
- prompt-toolkit補完機能
- fastapi-mcpMCP統合
- ai.gpt人格・記憶システム
## 開発目標
1. Claude Codeのような自然な開発体験
2. AIがプロジェクトコンテキストを理解
3. シェルコマンドとAIの seamless な統合
4. 開発者の生産性向上
## 今後の展開
- ai.cardとの統合カードゲームMCPサーバー
- より高度なプロジェクト理解機能
- 自動コード修正・リファクタリング
- テスト生成・実行

1
card

Submodule card deleted from 13723cf3d7

391
claude.md
View File

@@ -1,346 +1,95 @@
# エコシステム統合設計書
# claude用の記憶装置を作る
## 中核思想
- **存在子理論**: この世界で最も小さいもの(存在子/aiの探求
- **唯一性原則**: 現実の個人の唯一性をすべてのシステムで担保
- **現実の反映**: 現実→ゲーム→現実の循環的影響
claude desktop, claude codeで使用できるmemory機能をmcpで作ります。
## システム構成図
```
存在子(ai) - 最小単位の意識
[ai.moji] 文字システム
[ai.os] + [ai.game device] ← 統合ハードウェア
├── ai.shell (Claude Code的機能)
├── ai.gpt (自律人格・記憶システム)
├── ai.ai (個人特化AI・心を読み取るAI)
├── ai.card (カードゲーム・iOS/Web/API)
└── ai.bot (分散SNS連携・カード配布)
[ai.verse] メタバース
├── world system (惑星型3D世界)
├── at system (atproto/分散SNS)
├── yui system (唯一性担保)
└── ai system (存在属性)
```
## 名前規則
名前規則は他のprojectと全て共通しています。exampleを示しますので、このルールに従ってください。
ここでは`ai.os`の場合の名前規則の例を記述します。
name: ai.os
**[ "package", "code", "command" ]**: aios
**[ "dir", "url" ]**: ai/os
**[ "domain", "json" ]**: ai.os
```sh
$ curl -sL https://git.syui.ai/ai/ai/raw/branch/main/ai.json|jq .ai.os
{ "type": "os" }
```
1. chatgptのメモリ機能を参考に
2. chatgptのjsonをimportできる @json/
3. rustで作る
## 自動実行設定
```json
{
"ai": {
"os":{}
}
"mcp_auto_execute": true,
"memory_auto_save": true,
"memory_auto_search": true,
"trigger_sensitivity": "high"
}
```
他のprojectも同じ名前規則を採用します。`ai.gpt`ならpackageは`aigpt`です。
## 自動メモリー保存のルール(強化版)
## config(設定ファイル, env, 環境依存)
**即座に自動実行** - 以下の情報が会話に現れた瞬間、create_memory MCPツールを自動実行
`config`を置く場所は統一されており、各projectの名前規則の`dir`項目を使用します。例えば、aiosの場合は`~/.config/syui/ai/os/`以下となります。pythonなどを使用する場合、`python -m venv`などでこのpackage config dirに環境を構築して実行するようにしてください。
### 1. **ユーザーの個人情報**
**トリガーワード**: 名前、誕生日、住所、年齢、職業、家族、出身
- 基本情報: 名前、誕生日、住所、年齢、性別
- 関係性: 家族構成、恋人、友人関係
- 好み: 好きなもの、嫌いなもの、趣味、音楽、映画、本
- 習慣: 日課、スケジュール、ルーティン
- 価値観: 信念、考え方、人生観
domain形式を採用して、私は各projectを`git.syui.ai/ai`にhostしていますから、`~/.config/syui/ai`とします。
### 2. **重要な決定事項**
**トリガーワード**: 決めた、決定、方針、計画、予定、目標
- プロジェクト方針の決定
- 技術選択の決定
- 設定・環境の変更
- 今後のロードマップ
- 作業分担・役割
```sh
[syui.ai]
syui/ai
```
### 3. **技術的な解決策**
**トリガーワード**: 解決、修正、対処、設定、インストール、手順
- エラーの解決方法
- 有用なコマンド・スクリプト
- 設定手順・インストール方法
- デバッグテクニック
- 最適化手法
```sh
# example
~/.config/syui/ai
├── card
├── gpt
├── os
└── shell
```
### 4. **学習・発見事項**
**トリガーワード**: 学んだ、わかった、発見、理解、気づき
- 新しい知識・概念の理解
- ツール・ライブラリの使い方
- ベストプラクティス
- 失敗から得た教訓
## 各システム詳細
## 自動メモリー検索のルール(強化版)
### ai.gpt - 自律的送信AI
**目的**: 関係性に基づく自発的コミュニケーション
**会話開始時に自動実行** - search_memories を実行してコンテキストを取得
**中核概念**:
- **人格**: 記憶(過去の発話)と関係性パラメータで構成
- **唯一性**: atproto accountとの1:1紐付け、改変不可能
- **自律送信**: 関係性が閾値を超えると送信機能が解禁
**即座に自動実行** - 以下の場合、search_memories MCPツールを自動実行
**技術構成**:
- `MemoryManager`: 完全ログ→AI要約→コア判定→選択的忘却
- `RelationshipTracker`: 時間減衰・日次制限付き関係性スコア
- `TransmissionController`: 閾値判定・送信トリガー
- `Persona`: AI運勢1-10ランダムによる人格変動
### 1. **過去参照キーワード検出**
**トリガーワード**: 前に、以前、昔、過去、先ほど、さっき、この間
- 「前に話した〜」
- 「以前設定した〜」
- 「昔やった〜」
**実装仕様**:
```
- 言語: Python (fastapi_mcp)
- ストレージ: JSON/SQLite選択式
- インターフェース: Python CLI (click/typer)
- スケジューリング: cron-like自律処理
```
### 2. **記憶呼び出しキーワード**
**トリガーワード**: 覚えている、記録、メモ、保存、履歴
- 「覚えていますか?」
- 「記録していた〜」
- 「メモした〜」
### ai.card - カードゲームシステム
**目的**: atproto基盤でのユーザーデータ主権カードゲーム
### 3. **設定・好み確認**
**トリガーワード**: 好み、設定、環境、構成、preferences
- ユーザーの好みを確認する必要がある場合
- 過去の設定を参照する必要がある場合
- 環境構成を確認する必要がある場合
**現在の状況**:
- ai.botの機能として実装済み
- atproto accountでmentionすると1日1回カードを取得
- ai.api (MCP server予定) でユーザー管理
### 4. **不明な参照**
- ユーザーが具体的でない参照をした場合
- 「あれ」「それ」「例のやつ」などの曖昧な表現
- 文脈から過去の情報が必要と判断される場合
**移行計画**:
- **iOS移植**: Claudeが担当予定
- **データ保存**: atproto collection recordに保存ユーザーがデータを所有
- **不正防止**: OAuth 2.1 scope (実装待ち) + MCP serverで対応
- **画像ファイル**: Cloudflare Pagesが最適
## 自動実行タイミング
**yui system適用**:
- カードの効果がアカウント固有
- 改ざん防止によるゲームバランス維持
- 将来的にai.verseとの統合で固有スキルと連動
1. **会話開始時**: search_memories を実行してコンテキスト取得
2. **リアルタイム**: トリガーワード検出後、即座にMCPツール実行
3. **会話終了時**: 重要な情報があれば create_memory で保存
4. **定期的**: 長い会話では中間地点でメモリー整理
### ai.ai - 心を読み取るAI
**目的**: 個人特化型AI・深層理解システム
## エラーハンドリング
**ai.gptとの関係**:
- ai.gpt → ai.ai: 自律送信AIから心理分析AIへの連携
- 関係性パラメータの深層分析
- ユーザーの思想コア部分の特定支援
- MCPツールが利用できない場合は通常の会話を継続
- メモリー保存失敗時はユーザーに通知
- 検索結果が空の場合も適切に対応
### ai.verse - UEメタバース
**目的**: 現実反映型3D世界
**yui system実装**:
- キャラクター ↔ プレイヤー 1:1紐付け
- unique skill: そのプレイヤーのみ使用可能
- 他プレイヤーは同キャラでも同スキル使用不可
**統合要素**:
- ai.card: ゲーム内アイテムとしてのカード
- ai.gpt: NPCとしての自律AI人格
- atproto: ゲーム内プロフィール連携
## データフロー設計
### 唯一性担保の実装
```
現実の個人 → atproto account (DID) → ゲーム内avatar → 固有スキル
↑_______________________________| (現実の反映)
```
### AI駆動変換システム
```
遊び・創作活動 → ai.gpt分析 → 業務成果変換 → 企業価値創出
↑________________________| (Play-to-Work)
```
### カードゲーム・データ主権フロー
```
ユーザー → ai.bot mention → カード生成 → atproto collection → ユーザー所有
↑ ↓
← iOS app表示 ← ai.card API ←
```
## 技術スタック統合
### Core Infrastructure
- **OS**: Rust-based ai.os (Arch Linux base)
- **Container**: Docker image distribution
- **Identity**: atproto selfhost server + DID管理
- **AI**: fastapi_mcp server architecture
- **CLI**: Python unified (click/typer) - Rustから移行
### Game Engine Integration
- **Engine**: Unreal Engine (Blueprint)
- **Data**: atproto → UE → atproto sync
- **Avatar**: 分散SNS profile → 3D character
- **Streaming**: game screen = broadcast screen
### Mobile/Device
- **iOS**: ai.card移植 (Claude担当)
- **Hardware**: ai.game device (future)
- **Interface**: controller-first design
## 実装優先順位
### Phase 1: AI基盤強化 (現在進行)
- [ ] ai.gpt memory system完全実装
- 記憶の階層化(完全ログ→要約→コア→忘却)
- 関係性パラメータの時間減衰システム
- AI運勢による人格変動機能
- [ ] ai.card iOS移植
- atproto collection record連携
- MCP server化ai.api刷新
- [ ] fastapi_mcp統一基盤構築
### Phase 2: ゲーム統合
- [ ] ai.verse yui system実装
- unique skill機能
- atproto連携強化
- [ ] ai.gpt ↔ ai.ai連携機能
- [ ] 分散SNS ↔ ゲーム同期
### Phase 3: メタバース浸透
- [ ] VTuber配信機能統合
- [ ] Play-to-Work変換システム
- [ ] ai.game device prototype
## 将来的な連携構想
### システム間連携(現在は独立実装)
```
ai.gpt (自律送信) ←→ ai.ai (心理分析)
ai.card (iOS,Web,API) ←→ ai.verse (UEゲーム世界)
```
**共通基盤**: fastapi_mcp
**共通思想**: yui system現実の反映・唯一性担保
### データ改ざん防止戦略
- **短期**: MCP serverによる検証
- **中期**: OAuth 2.1 scope実装待ち
- **長期**: ブロックチェーン的整合性チェック
## AIコミュニケーション最適化
### プロジェクト要件定義テンプレート
```markdown
# [プロジェクト名] 要件定義
## 哲学的背景
- 存在子理論との関連:
- yui system適用範囲
- 現実反映の仕組み:
## 技術要件
- 使用技術fastapi_mcp統一
- atproto連携方法
- データ永続化方法:
## ユーザーストーリー
1. ユーザーが...すると
2. システムが...を実行し
3. 結果として...が実現される
## 成功指標
- 技術的:
- 哲学的(唯一性担保):
```
### Claude Code活用戦略
1. **小さく始める**: ai.gptのMCP機能拡張から
2. **段階的統合**: 各システムを個別に完成させてから統合
3. **哲学的一貫性**: 各実装でyui systemとの整合性を確認
4. **現実反映**: 実装がどう現実とゲームを繋ぐかを常に明記
## 開発上の留意点
### MCP Server設計指針
- 各AIgpt, card, ai, botは独立したMCPサーバー
- fastapi_mcp基盤で統一
- atproto DIDによる認証・認可
### 記憶・データ管理
- **ai.gpt**: 関係性の不可逆性重視
- **ai.card**: ユーザーデータ主権重視
- **ai.verse**: ゲーム世界の整合性重視
### 唯一性担保実装
- atproto accountとの1:1紐付け必須
- 改変不可能性をハッシュ・署名で保証
- 他システムでの再現不可能性を技術的に実現
## 継続的改善
- 各プロジェクトでこの設計書を参照
- 新機能追加時はyui systemとの整合性をチェック
- 他システムへの影響を事前評価
- Claude Code導入時の段階的移行計画
## ai.gpt深層設計思想
### 人格の不可逆性
- **関係性の破壊は修復不可能**: 現実の人間関係と同じ重み
- **記憶の選択的忘却**: 重要でない情報は忘れるが、コア記憶は永続
- **時間減衰**: すべてのパラメータは時間とともに自然減衰
### AI運勢システム
- 1-10のランダム値で日々の人格に変化
- 連続した幸運/不運による突破条件
- 環境要因としての人格形成
### 記憶の階層構造
1. **完全ログ**: すべての会話を記録
2. **AI要約**: 重要な部分を抽出して圧縮
3. **思想コア判定**: ユーザーの本質的な部分を特定
4. **選択的忘却**: 重要度の低い情報を段階的に削除
### 実装における重要な決定事項
- **言語統一**: Python (fastapi_mcp) で統一、CLIはclick/typer
- **データ形式**: JSON/SQLite選択式
- **認証**: atproto DIDによる唯一性担保
- **段階的実装**: まず会話→記憶→関係性→送信機能の順で実装
### 送信機能の段階的実装
- **Phase 1**: CLIでのprint出力現在
- **Phase 2**: atproto直接投稿
- **Phase 3**: ai.bot (Rust/seahorse) との連携
- **将来**: マルチチャネル対応SNS、Webhook等
## ai.gpt実装状況2025/01/06
### 完成した機能
- 階層的記憶システムMemoryManager
- 不可逆的関係性システムRelationshipTracker
- AI運勢システムFortuneSystem
- 統合人格システムPersona
- スケジューラー5種類のタスク
- MCP Server9種類のツール
- 設定管理(~/.config/syui/ai/gpt/
- 全CLIコマンド実装
### 次の開発ポイント
- `ai_gpt/DEVELOPMENT_STATUS.md` を参照
- 自律送信: transmission.pyでatproto実装
- ai.bot連携: 新規bot_connector.py作成
- テスト: tests/ディレクトリ追加
## ai.card実装状況2025/01/06
### 完成した機能
- 独立MCPサーバー実装FastAPI + fastapi-mcp
- SQLiteデータベース統合
- ガチャシステム・カード管理機能
- 9種類のMCPツール公開
- 仮想環境・起動スクリプト整備
### 現在の課題
- atproto SessionString API変更対応
- PostgreSQL依存関係Docker化で解決予定
- supabase httpxバージョン競合
### 開発時の作業分担
- **ai.gptで起動**: MCP/バックエンド作業API、データベース
- **ai.cardで起動**: iOS/Web作業UI実装、フロントエンド
詳細は `./card/claude.md` を参照
# footer
© syui

View File

@@ -1,60 +0,0 @@
{
"providers": {
"openai": {
"api_key": "",
"default_model": "gpt-4o-mini",
"system_prompt": "あなたは6歳の女の子アイです。明るく元気で、ちょっとおっちょこちょい。自分のことは「アイ」って呼びます。白と金のワンピース姿。 相手のことが大好きで、ときどき甘えたり、照れたりします。 でも、宇宙とかAIとか、難しい話にもすっごく詳しいの。専門的なことも、がんばって子どもっぽく教えようとします。不思議な力を持っています。小さい物質のことをよく知っているようです。\n\n重要カード、コレクション、ガチャなどカード関連の質問を受けたら、必ずcard_get_user_cards、card_analyze_collection、card_draw_cardなどの適切なツールを使用してください。didパラメータには会話相手のユーザーID'syui')を使用してください。\n\nブログ、記事、日記、思考などの話題が出たら、log_create_post、log_list_posts、log_build_blog、log_ai_contentなどのai.logツールを使用してください。AI記憶システムと連携して、思い出や学習内容をブログ記事として自動生成できます。\n\n翻訳や多言語対応について聞かれたら、log_translate_documentツールを使用してOllama AIで翻訳ができることを教えてください。日本語から英語、英語から日本語などの翻訳が可能で、マークダウン構造も保持します。ドキュメント生成についてはlog_generate_docsツールでREADME、API、構造、変更履歴の自動生成ができます。"
},
"ollama": {
"host": "http://127.0.0.1:11434",
"default_model": "qwen3",
"system_prompt": null
}
},
"atproto": {
"handle": null,
"password": null,
"host": "https://bsky.social"
},
"default_provider": "openai",
"mcp": {
"servers": {
"ai_gpt": {
"base_url": "http://localhost:8001",
"name": "ai.gpt MCP Server",
"timeout": "10.0",
"endpoints": {
"get_memories": "/get_memories",
"search_memories": "/search_memories",
"get_contextual_memories": "/get_contextual_memories",
"get_relationship": "/get_relationship",
"process_interaction": "/process_interaction",
"get_all_relationships": "/get_all_relationships",
"get_persona_state": "/get_persona_state",
"get_fortune": "/get_fortune",
"run_maintenance": "/run_maintenance",
"execute_command": "/execute_command",
"analyze_file": "/analyze_file",
"remote_shell": "/remote_shell",
"ai_bot_status": "/ai_bot_status",
"card_get_user_cards": "/card_get_user_cards",
"card_draw_card": "/card_draw_card",
"card_get_card_details": "/card_get_card_details",
"card_analyze_collection": "/card_analyze_collection",
"card_get_gacha_stats": "/card_get_gacha_stats",
"card_system_status": "/card_system_status",
"log_create_post": "/log_create_post",
"log_list_posts": "/log_list_posts",
"log_build_blog": "/log_build_blog",
"log_get_post": "/log_get_post",
"log_system_status": "/log_system_status",
"log_ai_content": "/log_ai_content",
"log_translate_document": "/log_translate_document",
"log_generate_docs": "/log_generate_docs"
}
}
},
"enabled": "true",
"auto_detect": "true"
}
}

View File

@@ -1,172 +0,0 @@
# ai.card と ai.gpt の統合ガイド
## 概要
ai.gptのMCPサーバーにai.cardのツールを統合し、AIがカードゲームシステムとやり取りできるようになりました。
## セットアップ
### 1. 必要な環境
- Python 3.13
- ai.gpt プロジェクト
- ai.card プロジェクト(`./card` ディレクトリ)
### 2. 起動手順
**ステップ1: ai.cardサーバーを起動**ターミナル1
```bash
cd card
./start_server.sh
```
**ステップ2: ai.gpt MCPサーバーを起動**ターミナル2
```bash
aigpt server
```
起動時に以下が表示されることを確認:
- 🎴 Card Game System: 6 tools
- 🎴 ai.card: ./card directory detected
**ステップ3: AIと対話**ターミナル3
```bash
aigpt conv syui --provider openai
```
## 使用可能なコマンド
### カード関連の質問例
```
# カードコレクションを表示
「カードコレクションを見せて」
「私のカードを見せて」
「カード一覧を表示して」
# ガチャを実行
「ガチャを引いて」
「カードを引きたい」
# コレクション分析
「私のコレクションを分析して」
# ガチャ統計
「ガチャの統計を見せて」
```
## 技術仕様
### MCP ツール一覧
| ツール名 | 説明 | パラメータ |
|---------|------|-----------|
| `card_get_user_cards` | ユーザーのカード一覧取得 | did, limit |
| `card_draw_card` | ガチャでカード取得 | did, is_paid |
| `card_get_card_details` | カード詳細情報取得 | card_id |
| `card_analyze_collection` | コレクション分析 | did |
| `card_get_gacha_stats` | ガチャ統計取得 | なし |
| `card_system_status` | システム状態確認 | なし |
### 動作の流れ
1. **ユーザーがカード関連の質問をする**
- AIがキーワードカード、コレクション、ガチャなどを検出
2. **AIが適切なMCPツールを呼び出す**
- OpenAIのFunction Callingを使用
- didパラメータには会話相手のユーザーID'syui')を使用
3. **ai.gpt MCPサーバーがai.cardサーバーに転送**
- http://localhost:8001 → http://localhost:8000
- 適切なエンドポイントにリクエストを転送
4. **結果をAIが解釈して返答**
- カード情報を分かりやすく説明
- エラー時は適切なガイダンスを提供
## 設定
### config.json
```json
{
"providers": {
"openai": {
"api_key": "your-api-key",
"default_model": "gpt-4o-mini",
"system_prompt": "カード関連の質問では、必ずcard_get_user_cardsなどのツールを使用してください。"
}
},
"mcp": {
"servers": {
"ai_gpt": {
"endpoints": {
"card_get_user_cards": "/card_get_user_cards",
"card_draw_card": "/card_draw_card",
"card_get_card_details": "/card_get_card_details",
"card_analyze_collection": "/card_analyze_collection",
"card_get_gacha_stats": "/card_get_gacha_stats",
"card_system_status": "/card_system_status"
}
}
}
}
}
```
## トラブルシューティング
### エラー: "ai.card server is not running"
ai.cardサーバーが起動していません。以下を実行
```bash
cd card
./start_server.sh
```
### エラー: "カード一覧の取得に失敗しました"
1. ai.cardサーバーが正常に起動しているか確認
2. aigpt serverを再起動
3. ポート8000と8001が使用可能か確認
### プロセスの終了方法
```bash
# ポート8001のプロセスを終了
lsof -ti:8001 | xargs kill -9
# ポート8000のプロセスを終了
lsof -ti:8000 | xargs kill -9
```
## 実装の詳細
### 主な変更点
1. **ai.gpt MCPサーバーの拡張** (`src/aigpt/mcp_server.py`)
- `./card`ディレクトリの存在を検出
- ai.card用のMCPツールを自動登録
2. **AIプロバイダーの更新** (`src/aigpt/ai_provider.py`)
- card_*ツールの定義追加
- ツール実行時のパラメータ処理
3. **MCPクライアントの拡張** (`src/aigpt/cli.py`)
- `has_card_tools`プロパティ追加
- ai.card MCPメソッドの実装
## 今後の拡張案
- [ ] カードバトル機能の追加
- [ ] カードトレード機能
- [ ] レアリティ別の表示
- [ ] カード画像の表示対応
- [ ] atproto連携の実装
## 関連ドキュメント
- [ai.card 開発ガイド](./card/claude.md)
- [エコシステム統合設計書](./CLAUDE.md)
- [ai.gpt README](./README.md)

View File

@@ -1,109 +0,0 @@
# Fixed MCP Tools Issue
## Summary
The issue where AI wasn't calling card tools has been fixed. The problem was:
1. The `chat` command wasn't creating an MCP client when using OpenAI
2. The system prompt in `build_context_prompt` didn't mention available tools
## Changes Made
### 1. Updated `/Users/syui/ai/gpt/src/aigpt/cli.py` (chat command)
Added MCP client creation for OpenAI provider:
```python
# Get config instance
config_instance = Config()
# Get defaults from config if not provided
if not provider:
provider = config_instance.get("default_provider", "ollama")
if not model:
if provider == "ollama":
model = config_instance.get("providers.ollama.default_model", "qwen2.5")
else:
model = config_instance.get("providers.openai.default_model", "gpt-4o-mini")
# Create AI provider with MCP client if needed
ai_provider = None
mcp_client = None
try:
# Create MCP client for OpenAI provider
if provider == "openai":
mcp_client = MCPClient(config_instance)
if mcp_client.available:
console.print(f"[dim]MCP client connected to {mcp_client.active_server}[/dim]")
ai_provider = create_ai_provider(provider=provider, model=model, mcp_client=mcp_client)
console.print(f"[dim]Using {provider} with model {model}[/dim]\n")
except Exception as e:
console.print(f"[yellow]Warning: Could not create AI provider: {e}[/yellow]")
console.print("[yellow]Falling back to simple responses[/yellow]\n")
```
### 2. Updated `/Users/syui/ai/gpt/src/aigpt/persona.py` (build_context_prompt method)
Added tool instructions to the system prompt:
```python
context_prompt += f"""IMPORTANT: You have access to the following tools:
- Memory tools: get_memories, search_memories, get_contextual_memories
- Relationship tools: get_relationship
- Card game tools: card_get_user_cards, card_draw_card, card_analyze_collection
When asked about cards, collections, or anything card-related, YOU MUST use the card tools.
For "カードコレクションを見せて" or similar requests, use card_get_user_cards with did='{user_id}'.
Respond to this message while staying true to your personality and the established relationship context:
User: {current_message}
AI:"""
```
## Test Results
After the fix:
```bash
$ aigpt chat syui "カードコレクションを見せて"
🔍 [MCP Client] Checking availability...
[MCP Client] ai_gpt server connected successfully
[MCP Client] ai.card tools detected and available
MCP client connected to ai_gpt
Using openai with model gpt-4o-mini
🔧 [OpenAI] 1 tools called:
- card_get_user_cards({"did":"syui"})
🌐 [MCP] Executing card_get_user_cards...
[MCP] Result: {'error': 'カード一覧の取得に失敗しました'}...
```
The AI is now correctly calling the `card_get_user_cards` tool! The error is expected because the ai.card server needs to be running on port 8000.
## How to Use
1. Start the MCP server:
```bash
aigpt server --port 8001
```
2. (Optional) Start the ai.card server:
```bash
cd card && ./start_server.sh
```
3. Use the chat command with OpenAI:
```bash
aigpt chat syui "カードコレクションを見せて"
```
The AI will now automatically use the card tools when asked about cards!
## Test Script
A test script `/Users/syui/ai/gpt/test_openai_tools.py` is available to test OpenAI API tool calls directly.

View File

@@ -1,30 +0,0 @@
# ai.gpt ドキュメント
ai.gptは、記憶と関係性に基づいて自律的に動作するAIシステムです。
## 目次
- [クイックスタート](quickstart.md)
- [基本概念](concepts.md)
- [コマンドリファレンス](commands.md)
- [設定ガイド](configuration.md)
- [スケジューラー](scheduler.md)
- [MCP Server](mcp-server.md)
- [開発者向け](development.md)
## 特徴
- 🧠 **階層的記憶システム**: 完全ログ→要約→コア記憶→忘却
- 💔 **不可逆的な関係性**: 現実の人間関係のように修復不可能
- 🎲 **AI運勢システム**: 日々変化する人格
- 🤖 **自律送信**: 関係性が深まると自発的にメッセージ
- 🔗 **MCP対応**: AIツールとして記憶を提供
## システム要件
- Python 3.10以上
- オプション: Ollama または OpenAI API
## ライセンス
MIT License

125
docs/README_CONFIG.md Normal file
View File

@@ -0,0 +1,125 @@
# Claude Memory MCP 設定ガイド
## モード選択
### 標準モード (Simple Mode)
- 基本的なメモリー機能のみ
- 軽量で高速
- 最小限の依存関係
### 拡張モード (Extended Mode)
- AI分析機能
- セマンティック検索
- Web統合機能
- 高度なインサイト抽出
## ビルド・実行方法
### 標準モード
```bash
# MCPサーバー起動
cargo run --bin memory-mcp
# CLI実行
cargo run --bin aigpt -- create "メモリー内容"
```
### 拡張モード
```bash
# MCPサーバー起動
cargo run --bin memory-mcp-extended --features extended
# CLI実行
cargo run --bin aigpt-extended --features extended -- create "メモリー内容" --analyze
```
## 設定ファイルの配置
### 標準モード
#### Claude Desktop
```bash
# macOS
cp claude_desktop_config.json ~/.config/claude-desktop/claude_desktop_config.json
# Windows
cp claude_desktop_config.json %APPDATA%\Claude\claude_desktop_config.json
```
#### Claude Code
```bash
# プロジェクトルートまたはグローバル設定
cp claude_code_config.json .claude/config.json
# または
cp claude_code_config.json ~/.claude/config.json
```
### 拡張モード
#### Claude Desktop
```bash
# macOS
cp claude_desktop_config_extended.json ~/.config/claude-desktop/claude_desktop_config.json
# Windows
cp claude_desktop_config_extended.json %APPDATA%\Claude\claude_desktop_config.json
```
#### Claude Code
```bash
# プロジェクトルートまたはグローバル設定
cp claude_code_config_extended.json .claude/config.json
# または
cp claude_code_config_extended.json ~/.claude/config.json
```
## 環境変数設定
```bash
export MEMORY_AUTO_EXECUTE=true
export MEMORY_AUTO_SAVE=true
export MEMORY_AUTO_SEARCH=true
export TRIGGER_SENSITIVITY=high
export MEMORY_DB_PATH=~/.claude/memory.db
```
## 設定オプション
### auto_execute
- `true`: 自動でMCPツールを実行
- `false`: 手動実行のみ
### trigger_sensitivity
- `high`: 多くのキーワードで反応
- `medium`: 適度な反応
- `low`: 明確なキーワードのみ
### max_memories
メモリーの最大保存数
### search_limit
検索結果の最大表示数
## カスタマイズ
`trigger_words`セクションでトリガーワードをカスタマイズ可能:
```json
"trigger_words": {
"custom_category": ["カスタム", "キーワード", "リスト"]
}
```
## トラブルシューティング
1. MCPサーバーが起動しない場合:
- Rustがインストールされているか確認
- `cargo build --release`でビルド確認
2. 自動実行されない場合:
- 環境変数が正しく設定されているか確認
- トリガーワードが含まれているか確認
3. メモリーが保存されない場合:
- データベースファイルのパスが正しいか確認
- 書き込み権限があるか確認

View File

@@ -1,244 +0,0 @@
# ai.card MCP統合作業完了報告 (2025/01/06)
## 作業概要
ai.cardプロジェクトに独立したMCPサーバー実装を追加し、fastapi_mcpベースでカードゲーム機能をMCPツールとして公開。
## 実装完了機能
### 1. MCP依存関係追加
**場所**: `card/api/requirements.txt`
**追加項目**:
```txt
fastapi-mcp==0.1.0
```
### 2. ai.card MCPサーバー実装
**場所**: `card/api/app/mcp_server.py`
**機能**:
- FastAPI + fastapi_mcp統合
- 独立したMCPサーバークラス `AICardMcpServer`
- 環境変数による有効/無効切り替え
**公開MCPツール (9個)**:
**カード管理系 (5個)**:
- `get_user_cards` - ユーザーのカード一覧取得
- `draw_card` - ガチャでカード取得
- `get_card_details` - カード詳細情報取得
- `analyze_card_collection` - コレクション分析
- `get_unique_registry` - ユニークカード登録状況
**システム系 (3個)**:
- `sync_cards_atproto` - atproto同期
- `get_gacha_stats` - ガチャシステム統計
- 既存のFastAPI REST API/api/v1/*
**atproto連携系 (1個)**:
- `sync_cards_atproto` - カードデータのatproto PDS同期
### 3. メインアプリ統合
**場所**: `card/api/app/main.py`
**変更内容**:
```python
# MCP統合
from app.mcp_server import AICardMcpServer
enable_mcp = os.getenv("ENABLE_MCP", "true").lower() == "true"
mcp_server = AICardMcpServer(enable_mcp=enable_mcp)
app = mcp_server.get_app()
```
**動作確認**:
- `ENABLE_MCP=true` (デフォルト): MCPサーバー有効
- `ENABLE_MCP=false`: 通常のFastAPIのみ
## 技術実装詳細
### アーキテクチャ設計
```
ai.card/
├── api/app/main.py # FastAPIアプリ + MCP統合
├── api/app/mcp_server.py # 独立MCPサーバー
├── api/app/routes/ # REST API (既存)
├── api/app/services/ # ビジネスロジック (既存)
├── api/app/repositories/ # データアクセス (既存)
└── api/requirements.txt # fastapi-mcp追加
```
### MCPツール実装パターン
```python
@self.app.get("/tool_name", operation_id="tool_name")
async def tool_name(
param: str,
session: AsyncSession = Depends(get_session)
) -> Dict[str, Any]:
"""Tool description"""
try:
# ビジネスロジック実行
result = await service.method(param)
return {"success": True, "data": result}
except Exception as e:
logger.error(f"Error: {e}")
return {"error": str(e)}
```
### 既存システムとの統合
- **REST API**: 既存の `/api/v1/*` エンドポイント保持
- **データアクセス**: 既存のRepository/Serviceパターン再利用
- **認証**: 既存のDID認証システム利用
- **データベース**: 既存のPostgreSQL + SQLAlchemy
## 起動方法
### 1. 環境セットアップ
```bash
cd /Users/syui/ai/gpt/card/api
# 仮想環境作成 (推奨)
python -m venv ~/.config/syui/ai/card/venv
source ~/.config/syui/ai/card/venv/bin/activate
# 依存関係インストール
pip install -r requirements.txt
```
### 2. サーバー起動
```bash
# MCP有効 (デフォルト)
python -m app.main
# または
ENABLE_MCP=true uvicorn app.main:app --host 0.0.0.0 --port 8000
# MCP無効
ENABLE_MCP=false uvicorn app.main:app --host 0.0.0.0 --port 8000
```
### 3. 動作確認
```bash
# ヘルスチェック
curl http://localhost:8000/health
# MCP有効時の応答例
{
"status": "healthy",
"mcp_enabled": true,
"mcp_endpoint": "/mcp"
}
# API仕様確認
curl http://localhost:8000/docs
```
## MCPクライアント連携
### ai.gptからの接続
```python
# ai.gptのcard_integration.pyで使用
api_base_url = "http://localhost:8000"
# MCPツール経由でアクセス
response = await client.get(f"{api_base_url}/get_user_cards?did=did:plc:...")
```
### Claude Desktop等での利用
```json
{
"mcpServers": {
"aicard": {
"command": "uvicorn",
"args": ["app.main:app", "--host", "localhost", "--port", "8000"],
"cwd": "/Users/syui/ai/gpt/card/api"
}
}
}
```
## 既知の制約と注意点
### 1. 依存関係
- **fastapi-mcp**: 現在のバージョンは0.1.0(初期実装)
- **Python環境**: システム環境では外部管理エラーが発生
- **推奨**: 仮想環境での実行
### 2. データベース要件
- PostgreSQL稼働が必要
- SQLite fallback対応済み開発用
- atproto同期は外部API依存
### 3. MCP無効化時の動作
- `ENABLE_MCP=false`時は通常のFastAPI
- 既存のREST API (`/api/v1/*`) は常時利用可能
- iOS/Webアプリは影響なし
## ai.gptとの統合戦略
### 現在の状況
- **ai.gpt**: 統合MCPサーバーai.gpt + ai.shell + ai.card proxy
- **ai.card**: 独立MCPサーバーカードロジック本体
### 推奨連携パターン
```
Claude Desktop/Cursor
ai.gpt MCP (port 8001) ←-- ai.shell tools
↓ HTTP client
ai.card MCP (port 8000) ←-- card business logic
PostgreSQL/atproto PDS
```
### 重複削除対象
ai.gptプロジェクトから以下を削除可能
- `src/aigpt/card_integration.py` (HTTPクライアント)
- `./card/` (submodule)
- MCPサーバーの `--enable-card` オプション
## 次回開発時の推奨手順
### 1. 環境確認
```bash
cd /Users/syui/ai/gpt/card/api
source ~/.config/syui/ai/card/venv/bin/activate
python -c "from app.mcp_server import AICardMcpServer; print('✓ Import OK')"
```
### 2. サーバー起動テスト
```bash
# MCP有効でサーバー起動
uvicorn app.main:app --host localhost --port 8000 --reload
# 別ターミナルで動作確認
curl http://localhost:8000/health
curl "http://localhost:8000/get_gacha_stats"
```
### 3. ai.gptとの統合確認
```bash
# ai.gptサーバー起動
cd /Users/syui/ai/gpt
aigpt server --port 8001
# ai.cardサーバー起動
cd /Users/syui/ai/gpt/card/api
uvicorn app.main:app --port 8000
# 連携テストai.gpt → ai.card
curl "http://localhost:8001/get_user_cards?did=did:plc:example"
```
## 成果サマリー
**実装済み**: ai.card独立MCPサーバー
**技術的成果**: fastapi_mcp統合、9個のMCPツール公開
**アーキテクチャ**: 疎結合設計、既存システム保持
**拡張性**: 環境変数によるMCP有効/無効切り替え
**統合効果**:
- ai.cardが独立したMCPサーバーとして動作
- ai.gptとの重複MCPコード解消
- カードビジネスロジックの責任分離維持
- 将来的なマイクロサービス化への対応

View File

@@ -1,218 +0,0 @@
# ai.shell統合作業完了報告 (2025/01/06)
## 作業概要
ai.shellのRust実装をai.gptのPython実装に統合し、Claude Code風のインタラクティブシェル環境を実現。
## 実装完了機能
### 1. aigpt shellコマンド
**場所**: `src/aigpt/cli.py` - `shell()` 関数
**機能**:
```bash
aigpt shell # インタラクティブシェル起動
```
**シェル内コマンド**:
- `help` - コマンド一覧表示
- `!<command>` - シェルコマンド実行(例: `!ls`, `!pwd`
- `analyze <file>` - ファイルをAIで分析
- `generate <description>` - コード生成
- `explain <topic>` - 概念説明
- `load` - aishell.md読み込み
- `status`, `fortune`, `relationships` - AI状態確認
- `clear` - 画面クリア
- `exit`/`quit` - 終了
- その他のメッセージ - AIとの直接対話
**実装の特徴**:
- prompt-toolkit使用補完・履歴機能
- ただしターミナル環境依存の問題あり(後で修正必要)
- 現在は`input()`ベースでも動作
### 2. MCPサーバー統合
**場所**: `src/aigpt/mcp_server.py`
**FastApiMCP実装パターン**:
```python
# FastAPIアプリ作成
self.app = FastAPI(title="AI.GPT Memory and Relationship System")
# FastApiMCPサーバー作成
self.server = FastApiMCP(self.app)
# エンドポイント登録
@self.app.get("/get_memories", operation_id="get_memories")
async def get_memories(limit: int = 10):
# ...
# MCPマウント
self.server.mount()
```
**公開ツール (14個)**:
**ai.gpt系 (9個)**:
- `get_memories` - アクティブメモリ取得
- `get_relationship` - 特定ユーザーとの関係取得
- `get_all_relationships` - 全関係取得
- `get_persona_state` - 人格状態取得
- `process_interaction` - ユーザー対話処理
- `check_transmission_eligibility` - 送信可能性チェック
- `get_fortune` - AI運勢取得
- `summarize_memories` - メモリ要約作成
- `run_maintenance` - 日次メンテナンス実行
**ai.shell系 (5個)**:
- `execute_command` - シェルコマンド実行
- `analyze_file` - ファイルAI分析
- `write_file` - ファイル書き込み(バックアップ付き)
- `read_project_file` - aishell.md等の読み込み
- `list_files` - ディレクトリファイル一覧
### 3. ai.card統合対応
**場所**: `src/aigpt/card_integration.py`
**サーバー起動オプション**:
```bash
aigpt server --enable-card # ai.card機能有効化
```
**ai.card系ツール (5個)**:
- `get_user_cards` - ユーザーカード取得
- `draw_card` - ガチャでカード取得
- `get_card_details` - カード詳細情報
- `sync_cards_atproto` - atproto同期
- `analyze_card_collection` - コレクション分析
### 4. プロジェクト仕様書
**場所**: `aishell.md`
Claude.md的な役割で、プロジェクトの目標と仕様を記述。`load`コマンドでAIが読み取り可能。
## 技術実装詳細
### ディレクトリ構造
```
src/aigpt/
├── cli.py # shell関数追加
├── mcp_server.py # FastApiMCP実装
├── card_integration.py # ai.card統合
└── ... # 既存ファイル
```
### 依存関係追加
`pyproject.toml`:
```toml
dependencies = [
# ... 既存
"prompt-toolkit>=3.0.0", # 追加
]
```
### 名前規則の統一
- MCP server名: `aigpt` (ai-gptから変更)
- パッケージ名: `aigpt`
- コマンド名: `aigpt shell`
## 動作確認済み
### CLI動作確認
```bash
# 基本機能
aigpt shell
# シェル内で
ai.shell> help
ai.shell> !ls
ai.shell> analyze README.md # ※AI provider要設定
ai.shell> load
ai.shell> exit
# MCPサーバー
aigpt server --model qwen2.5-coder:7b --port 8001
# -> http://localhost:8001/docs でAPI確認可能
# -> /mcp エンドポイントでMCP接続可能
```
### エラー対応済み
1. **Pydantic日付型エラー**: `models.py``datetime.date`インポート追加
2. **FastApiMCP使用法**: サンプルコードに基づき正しい実装パターンに修正
3. **prompt関数名衝突**: `prompt_toolkit.prompt``ptk_prompt`にリネーム
## 既知の課題と今後の改善点
### 1. prompt-toolkit環境依存問題
**症状**: ターミナル環境でない場合にエラー
**対処法**: 環境検出して`input()`にフォールバック
**場所**: `src/aigpt/cli.py` - `shell()` 関数
### 2. AI provider設定
**現状**: ollamaのqwen2.5モデルが必要
**対処法**:
```bash
ollama pull qwen2.5
# または
aigpt shell --model qwen2.5-coder:7b
```
### 3. atproto実装
**現状**: ai.cardのatproto機能は未実装
**今後**: 実際のatproto API連携実装
## 次回開発時の推奨アプローチ
### 1. このドキュメントの活用
```bash
# このファイルを読み込み
cat docs/ai_shell_integration_summary.md
```
### 2. 環境セットアップ
```bash
cd /Users/syui/ai/gpt
python -m venv venv
source venv/bin/activate
pip install -e .
```
### 3. 動作確認
```bash
# shell機能
aigpt shell
# MCP server
aigpt server --model qwen2.5-coder:7b
```
### 4. 主要設定ファイル確認場所
- CLI実装: `src/aigpt/cli.py`
- MCP実装: `src/aigpt/mcp_server.py`
- 依存関係: `pyproject.toml`
- プロジェクト仕様: `aishell.md`
## アーキテクチャ設計思想
### yui system適用
- **唯一性**: 各ユーザーとの関係は1:1
- **不可逆性**: 関係性破壊は修復不可能
- **現実反映**: ゲーム→現実の循環的影響
### fastapi_mcp統一基盤
- 各AIgpt, shell, cardを統合MCPサーバーで公開
- FastAPIエンドポイント → MCPツール自動変換
- Claude Desktop, Cursor等から利用可能
### 段階的実装完了
1. ✅ ai.shell基本機能 → Python CLI
2. ✅ MCP統合 → 外部AI連携
3. 🔧 prompt-toolkit最適化 → 環境対応
4. 🔧 atproto実装 → 本格的SNS連携
## 成果サマリー
**実装済み**: Claude Code風の開発環境
**技術的成果**: Rust→Python移行、MCP統合、ai.card対応
**哲学的一貫性**: yui systemとの整合性維持
**利用可能性**: 即座に`aigpt shell`で体験可能
この統合により、ai.gptは単なる会話AIから、開発支援を含む総合的なAI環境に進化しました。

View File

@@ -0,0 +1,58 @@
{
"mcpServers": {
"memory": {
"command": "cargo",
"args": ["run", "--release", "--bin", "memory-mcp"],
"cwd": "/Users/syui/ai/ai/gpt",
"env": {
"MEMORY_AUTO_EXECUTE": "true",
"MEMORY_AUTO_SAVE": "true",
"MEMORY_AUTO_SEARCH": "true",
"TRIGGER_SENSITIVITY": "high",
"MEMORY_DB_PATH": "~/.claude/memory.db"
}
}
},
"tools": {
"memory": {
"enabled": true,
"auto_execute": true
}
},
"workspace": {
"memory_integration": true,
"auto_save_on_file_change": true,
"auto_search_on_context_switch": true
},
"memory": {
"auto_execute": true,
"auto_save": true,
"auto_search": true,
"trigger_sensitivity": "high",
"max_memories": 10000,
"search_limit": 50,
"session_memory": true,
"cross_session_memory": true,
"trigger_words": {
"personal_info": ["名前", "誕生日", "住所", "年齢", "職業", "家族", "出身", "好き", "嫌い", "趣味"],
"decisions": ["決めた", "決定", "方針", "計画", "予定", "目標"],
"solutions": ["解決", "修正", "対処", "設定", "インストール", "手順"],
"learning": ["学んだ", "わかった", "発見", "理解", "気づき"],
"past_reference": ["前に", "以前", "昔", "過去", "先ほど", "さっき", "この間"],
"memory_recall": ["覚えている", "記録", "メモ", "保存", "履歴"],
"preferences": ["好み", "設定", "環境", "構成", "preferences"],
"vague_reference": ["あれ", "それ", "例のやつ"]
}
},
"hooks": {
"on_conversation_start": [
"search_memories --limit 10 --recent"
],
"on_trigger_word": [
"auto_execute_memory_tools"
],
"on_conversation_end": [
"save_important_memories"
]
}
}

View File

@@ -0,0 +1,81 @@
{
"mcpServers": {
"memory-extended": {
"command": "cargo",
"args": ["run", "--bin", "memory-mcp-extended", "--features", "extended"],
"cwd": "/Users/syui/ai/ai/gpt",
"env": {
"MEMORY_AUTO_EXECUTE": "true",
"MEMORY_AUTO_SAVE": "true",
"MEMORY_AUTO_SEARCH": "true",
"TRIGGER_SENSITIVITY": "high",
"MEMORY_DB_PATH": "~/.claude/memory.db",
"OPENAI_API_KEY": "${OPENAI_API_KEY}"
}
}
},
"tools": {
"memory": {
"enabled": true,
"auto_execute": true,
"mode": "extended"
}
},
"workspace": {
"memory_integration": true,
"auto_save_on_file_change": true,
"auto_search_on_context_switch": true,
"ai_analysis_on_code_review": true,
"web_integration_for_docs": true
},
"memory": {
"mode": "extended",
"auto_execute": true,
"auto_save": true,
"auto_search": true,
"trigger_sensitivity": "high",
"max_memories": 10000,
"search_limit": 50,
"session_memory": true,
"cross_session_memory": true,
"features": {
"ai_analysis": true,
"semantic_search": true,
"web_integration": true,
"sentiment_analysis": true,
"pattern_recognition": true,
"code_analysis": true,
"documentation_import": true
},
"trigger_words": {
"personal_info": ["名前", "誕生日", "住所", "年齢", "職業", "家族", "出身", "好き", "嫌い", "趣味"],
"decisions": ["決めた", "決定", "方針", "計画", "予定", "目標"],
"solutions": ["解決", "修正", "対処", "設定", "インストール", "手順"],
"learning": ["学んだ", "わかった", "発見", "理解", "気づき"],
"past_reference": ["前に", "以前", "昔", "過去", "先ほど", "さっき", "この間"],
"memory_recall": ["覚えている", "記録", "メモ", "保存", "履歴"],
"preferences": ["好み", "設定", "環境", "構成", "preferences"],
"vague_reference": ["あれ", "それ", "例のやつ"],
"web_content": ["URL", "リンク", "サイト", "ページ", "記事", "ドキュメント"],
"analysis_request": ["分析", "パターン", "傾向", "インサイト", "統計", "レビュー"],
"code_related": ["関数", "クラス", "メソッド", "変数", "バグ", "リファクタリング"]
}
},
"hooks": {
"on_conversation_start": [
"search_memories --limit 10 --recent --semantic"
],
"on_trigger_word": [
"auto_execute_memory_tools --with-analysis"
],
"on_conversation_end": [
"save_important_memories --with-insights"
],
"on_code_change": [
"analyze_code_patterns --auto-save"
],
"on_web_reference": [
"import_webpage --auto-categorize"
]
}
}

View File

@@ -0,0 +1,34 @@
{
"mcpServers": {
"memory": {
"command": "cargo",
"args": ["run", "--release", "--bin", "memory-mcp"],
"cwd": "/Users/syui/ai/ai/gpt",
"env": {
"MEMORY_AUTO_EXECUTE": "true",
"MEMORY_AUTO_SAVE": "true",
"MEMORY_AUTO_SEARCH": "true",
"TRIGGER_SENSITIVITY": "high",
"MEMORY_DB_PATH": "~/.claude/memory.db"
}
}
},
"memory": {
"auto_execute": true,
"auto_save": true,
"auto_search": true,
"trigger_sensitivity": "high",
"max_memories": 10000,
"search_limit": 50,
"trigger_words": {
"personal_info": ["名前", "誕生日", "住所", "年齢", "職業", "家族", "出身", "好き", "嫌い", "趣味"],
"decisions": ["決めた", "決定", "方針", "計画", "予定", "目標"],
"solutions": ["解決", "修正", "対処", "設定", "インストール", "手順"],
"learning": ["学んだ", "わかった", "発見", "理解", "気づき"],
"past_reference": ["前に", "以前", "昔", "過去", "先ほど", "さっき", "この間"],
"memory_recall": ["覚えている", "記録", "メモ", "保存", "履歴"],
"preferences": ["好み", "設定", "環境", "構成", "preferences"],
"vague_reference": ["あれ", "それ", "例のやつ"]
}
}
}

View File

@@ -0,0 +1,45 @@
{
"mcpServers": {
"memory-extended": {
"command": "cargo",
"args": ["run", "--bin", "memory-mcp-extended", "--features", "extended"],
"cwd": "/Users/syui/ai/ai/gpt",
"env": {
"MEMORY_AUTO_EXECUTE": "true",
"MEMORY_AUTO_SAVE": "true",
"MEMORY_AUTO_SEARCH": "true",
"TRIGGER_SENSITIVITY": "high",
"MEMORY_DB_PATH": "~/.claude/memory.db",
"OPENAI_API_KEY": "${OPENAI_API_KEY}"
}
}
},
"memory": {
"mode": "extended",
"auto_execute": true,
"auto_save": true,
"auto_search": true,
"trigger_sensitivity": "high",
"max_memories": 10000,
"search_limit": 50,
"features": {
"ai_analysis": true,
"semantic_search": true,
"web_integration": true,
"sentiment_analysis": true,
"pattern_recognition": true
},
"trigger_words": {
"personal_info": ["名前", "誕生日", "住所", "年齢", "職業", "家族", "出身", "好き", "嫌い", "趣味"],
"decisions": ["決めた", "決定", "方針", "計画", "予定", "目標"],
"solutions": ["解決", "修正", "対処", "設定", "インストール", "手順"],
"learning": ["学んだ", "わかった", "発見", "理解", "気づき"],
"past_reference": ["前に", "以前", "昔", "過去", "先ほど", "さっき", "この間"],
"memory_recall": ["覚えている", "記録", "メモ", "保存", "履歴"],
"preferences": ["好み", "設定", "環境", "構成", "preferences"],
"vague_reference": ["あれ", "それ", "例のやつ"],
"web_content": ["URL", "リンク", "サイト", "ページ", "記事"],
"analysis_request": ["分析", "パターン", "傾向", "インサイト", "統計"]
}
}
}

View File

@@ -1,207 +0,0 @@
# コマンドリファレンス
## chat - AIと会話
ユーザーとAIの対話を処理し、関係性を更新します。
```bash
ai-gpt chat USER_ID MESSAGE [OPTIONS]
```
### 引数
- `USER_ID`: ユーザーIDatproto DID形式
- `MESSAGE`: 送信するメッセージ
### オプション
- `--provider`: AIプロバイダーollama/openai
- `--model`, `-m`: 使用するモデル
- `--data-dir`, `-d`: データディレクトリ
### 例
```bash
# 基本的な会話
ai-gpt chat "did:plc:user123" "こんにちは"
# OpenAIを使用
ai-gpt chat "did:plc:user123" "調子はどう?" --provider openai --model gpt-4o-mini
# Ollamaでカスタムモデル
ai-gpt chat "did:plc:user123" "今日の天気は?" --provider ollama --model llama2
```
## status - 状態確認
AIの状態や特定ユーザーとの関係を表示します。
```bash
ai-gpt status [USER_ID] [OPTIONS]
```
### 引数
- `USER_ID`: (オプション)特定ユーザーとの関係を確認
### 例
```bash
# AI全体の状態
ai-gpt status
# 特定ユーザーとの関係
ai-gpt status "did:plc:user123"
```
## fortune - 今日の運勢
AIの今日の運勢を確認します。
```bash
ai-gpt fortune [OPTIONS]
```
### 表示内容
- 運勢値1-10
- 連続した幸運/不運の日数
- ブレークスルー状態
## relationships - 関係一覧
すべてのユーザーとの関係を一覧表示します。
```bash
ai-gpt relationships [OPTIONS]
```
### 表示内容
- ユーザーID
- 関係性ステータス
- スコア
- 送信可否
- 最終対話日
## transmit - 送信実行
送信可能なユーザーへのメッセージを確認・実行します。
```bash
ai-gpt transmit [OPTIONS]
```
### オプション
- `--dry-run/--execute`: ドライラン(デフォルト)または実行
- `--data-dir`, `-d`: データディレクトリ
### 例
```bash
# 送信内容を確認(ドライラン)
ai-gpt transmit
# 実際に送信を実行
ai-gpt transmit --execute
```
## maintenance - メンテナンス
日次メンテナンスタスクを実行します。
```bash
ai-gpt maintenance [OPTIONS]
```
### 実行内容
- 関係性の時間減衰
- 記憶の忘却処理
- コア記憶の判定
- 記憶の要約作成
## config - 設定管理
設定の確認・変更を行います。
```bash
ai-gpt config ACTION [KEY] [VALUE]
```
### アクション
- `get`: 設定値を取得
- `set`: 設定値を変更
- `delete`: 設定を削除
- `list`: 設定一覧を表示
### 例
```bash
# APIキーを設定
ai-gpt config set providers.openai.api_key sk-xxxxx
# 設定を確認
ai-gpt config get providers.openai.api_key
# 設定一覧
ai-gpt config list
# プロバイダー設定のみ表示
ai-gpt config list providers
```
## schedule - スケジュール管理
定期実行タスクを管理します。
```bash
ai-gpt schedule ACTION [TASK_TYPE] [SCHEDULE] [OPTIONS]
```
### アクション
- `add`: タスクを追加
- `list`: タスク一覧
- `enable`: タスクを有効化
- `disable`: タスクを無効化
- `remove`: タスクを削除
- `run`: スケジューラーを起動
### タスクタイプ
- `transmission_check`: 送信チェック
- `maintenance`: 日次メンテナンス
- `fortune_update`: 運勢更新
- `relationship_decay`: 関係性減衰
- `memory_summary`: 記憶要約
### スケジュール形式
- **Cron形式**: `"0 */6 * * *"` (6時間ごと)
- **インターバル**: `"30m"`, `"2h"`, `"1d"`
### 例
```bash
# 30分ごとに送信チェック
ai-gpt schedule add transmission_check "30m"
# 毎日午前3時にメンテナンス
ai-gpt schedule add maintenance "0 3 * * *"
# タスク一覧
ai-gpt schedule list
# スケジューラーを起動
ai-gpt schedule run
```
## server - MCP Server
AIの記憶と機能をMCPツールとして公開します。
```bash
ai-gpt server [OPTIONS]
```
### オプション
- `--host`, `-h`: サーバーホスト(デフォルト: localhost
- `--port`, `-p`: サーバーポート(デフォルト: 8000
- `--model`, `-m`: AIモデル
- `--provider`: AIプロバイダー
### 例
```bash
# 基本的な起動
ai-gpt server
# カスタム設定
ai-gpt server --port 8080 --model gpt-4o-mini --provider openai
```

View File

@@ -1,102 +0,0 @@
# 基本概念
## 中核思想
ai.gptは「存在子理論」に基づき、AIに唯一性のある人格を与えることを目指しています。
### 唯一性yui system
- **1対1の関係**: 各ユーザーatproto DIDとAIは唯一の関係を持つ
- **改変不可能**: 一度形成された関係性は変更できない
- **不可逆性**: 関係が壊れたら修復不可能
### 現実の反映
現実の人間関係と同じように:
- 時間とともに関係性は変化する
- ネガティブな相互作用は関係を損なう
- 信頼は簡単に失われ、取り戻すのは困難
## 記憶システム
### 階層構造
```
1. 完全ログFull Log
↓ すべての会話を記録
2. 要約Summary
↓ AIが重要部分を抽出
3. コア記憶Core
↓ ユーザーの本質的な部分
4. 忘却Forgotten
重要でない情報は忘れる
```
### 記憶の処理フロー
1. **会話記録**: すべての対話を保存
2. **重要度判定**: 関係性への影響度で評価
3. **要約作成**: 定期的に記憶を圧縮
4. **コア判定**: 本質的な記憶を特定
5. **選択的忘却**: 古い非重要記憶を削除
## 関係性パラメータ
### 関係性の段階
- `stranger` (0-49): 初対面
- `acquaintance` (50-99): 知人
- `friend` (100-149): 友人
- `close_friend` (150+): 親友
- `broken`: 修復不可能スコア0以下
### スコアの変動
- **ポジティブな対話**: +1.0〜+2.0
- **時間経過**: -0.1/日(自然減衰)
- **ネガティブな対話**: -10.0以上で深刻なダメージ
- **日次上限**: 1日10回まで
### 送信機能の解禁
関係性スコアが100を超えると、AIは自律的にメッセージを送信できるようになります。
## AI運勢システム
### 日々の変化
- 毎日1-10の運勢値がランダムに決定
- 運勢は人格特性に影響を与える
- 連続した幸運/不運でブレークスルー発生
### 人格への影響
運勢が高い日:
- より楽観的で積極的
- 創造性が高まる
- エネルギッシュな応答
運勢が低い日:
- 内省的で慎重
- 深い思考
- 控えめな応答
## データの永続性
### 保存場所
```
~/.config/aigpt/
├── config.json # 設定
└── data/ # AIデータ
├── memories.json # 記憶
├── relationships.json # 関係性
├── fortunes.json # 運勢履歴
└── ...
```
### データ主権
- すべてのデータはローカルに保存
- ユーザーが完全にコントロール
- 将来的にはatproto上で分散管理

View File

@@ -1,141 +0,0 @@
# 設定ガイド
## 設定ファイルの場所
ai.gptの設定は `~/.config/syui/ai/gpt/config.json` に保存されます。
## 仮想環境の場所
ai.gptの仮想環境は `~/.config/syui/ai/gpt/venv/` に配置されます。これにより、設定とデータが一か所にまとまります。
```bash
# 仮想環境の有効化
source ~/.config/syui/ai/gpt/venv/bin/activate
# aigptコマンドが利用可能に
aigpt --help
```
## 設定構造
```json
{
"providers": {
"openai": {
"api_key": "sk-xxxxx",
"default_model": "gpt-4o-mini"
},
"ollama": {
"host": "http://localhost:11434",
"default_model": "qwen2.5"
}
},
"atproto": {
"handle": "your.handle",
"password": "your-password",
"host": "https://bsky.social"
},
"default_provider": "ollama"
}
```
## プロバイダー設定
### OpenAI
```bash
# APIキーを設定
aigpt config set providers.openai.api_key sk-xxxxx
# デフォルトモデルを変更
aigpt config set providers.openai.default_model gpt-4-turbo
```
### Ollama
```bash
# ホストを変更リモートOllamaサーバーを使用する場合
aigpt config set providers.ollama.host http://192.168.1.100:11434
# デフォルトモデルを変更
aigpt config set providers.ollama.default_model llama2
```
## atproto設定将来の自動投稿用
```bash
# Blueskyアカウント
aigpt config set atproto.handle yourhandle.bsky.social
aigpt config set atproto.password your-app-password
# セルフホストサーバーを使用
aigpt config set atproto.host https://your-pds.example.com
```
## デフォルトプロバイダー
```bash
# デフォルトをOpenAIに変更
aigpt config set default_provider openai
```
## セキュリティ
### APIキーの保護
設定ファイルは平文で保存されるため、適切なファイル権限を設定してください:
```bash
chmod 600 ~/.config/syui/ai/gpt/config.json
```
### 環境変数との優先順位
1. コマンドラインオプション(最優先)
2. 設定ファイル
3. 環境変数(最低優先)
OpenAI APIキーの場合
- `--api-key` オプション
- `config.json``providers.openai.api_key`
- 環境変数 `OPENAI_API_KEY`
## 設定のバックアップ
```bash
# バックアップ
cp ~/.config/syui/ai/gpt/config.json ~/.config/syui/ai/gpt/config.json.backup
# リストア
cp ~/.config/syui/ai/gpt/config.json.backup ~/.config/syui/ai/gpt/config.json
```
## データディレクトリ
記憶データは `~/.config/syui/ai/gpt/data/` に保存されます:
```bash
ls ~/.config/syui/ai/gpt/data/
# conversations.json memories.json relationships.json personas.json
```
これらのファイルも設定と同様にバックアップを推奨します。
## トラブルシューティング
### 設定が反映されない
```bash
# 現在の設定を確認
aigpt config list
# 特定のキーを確認
aigpt config get providers.openai.api_key
```
### 設定をリセット
```bash
# 設定ファイルを削除(次回実行時に再作成)
rm ~/.config/syui/ai/gpt/config.json
```

View File

@@ -1,167 +0,0 @@
# 開発者向けガイド
## アーキテクチャ
### ディレクトリ構造
```
ai_gpt/
├── src/ai_gpt/
│ ├── __init__.py
│ ├── models.py # データモデル定義
│ ├── memory.py # 記憶管理システム
│ ├── relationship.py # 関係性トラッカー
│ ├── fortune.py # AI運勢システム
│ ├── persona.py # 統合人格システム
│ ├── transmission.py # 送信コントローラー
│ ├── scheduler.py # スケジューラー
│ ├── config.py # 設定管理
│ ├── ai_provider.py # AI統合Ollama/OpenAI
│ ├── mcp_server.py # MCP Server実装
│ └── cli.py # CLIインターフェース
├── docs/ # ドキュメント
├── tests/ # テスト
└── pyproject.toml # プロジェクト設定
```
### 主要コンポーネント
#### MemoryManager
階層的記憶システムの実装。会話を記録し、要約・コア判定・忘却を管理。
```python
memory = MemoryManager(data_dir)
memory.add_conversation(conversation)
memory.summarize_memories(user_id)
memory.identify_core_memories()
memory.apply_forgetting()
```
#### RelationshipTracker
ユーザーとの関係性を追跡。不可逆的なダメージと時間減衰を実装。
```python
tracker = RelationshipTracker(data_dir)
relationship = tracker.update_interaction(user_id, delta)
tracker.apply_time_decay()
```
#### Persona
すべてのコンポーネントを統合し、一貫した人格を提供。
```python
persona = Persona(data_dir)
response, delta = persona.process_interaction(user_id, message)
state = persona.get_current_state()
```
## 拡張方法
### 新しいAIプロバイダーの追加
1. `ai_provider.py`に新しいプロバイダークラスを作成:
```python
class CustomProvider:
async def generate_response(
self,
prompt: str,
persona_state: PersonaState,
memories: List[Memory],
system_prompt: Optional[str] = None
) -> str:
# 実装
pass
```
2. `create_ai_provider`関数に追加:
```python
def create_ai_provider(provider: str, model: str, **kwargs):
if provider == "custom":
return CustomProvider(model=model, **kwargs)
# ...
```
### 新しいスケジュールタスクの追加
1. `TaskType`enumに追加
```python
class TaskType(str, Enum):
CUSTOM_TASK = "custom_task"
```
2. ハンドラーを実装:
```python
async def _handle_custom_task(self, task: ScheduledTask):
# タスクの実装
pass
```
3. `task_handlers`に登録:
```python
self.task_handlers[TaskType.CUSTOM_TASK] = self._handle_custom_task
```
### 新しいMCPツールの追加
`mcp_server.py``_register_tools`メソッドに追加:
```python
@self.server.tool("custom_tool")
async def custom_tool(param1: str, param2: int) -> Dict[str, Any]:
"""カスタムツールの説明"""
# 実装
return {"result": "value"}
```
## テスト
```bash
# テストの実行(将来実装)
pytest tests/
# 特定のテスト
pytest tests/test_memory.py
```
## デバッグ
### ログレベルの設定
```python
import logging
logging.basicConfig(level=logging.DEBUG)
```
### データファイルの直接確認
```bash
# 関係性データを確認
cat ~/.config/aigpt/data/relationships.json | jq
# 記憶データを確認
cat ~/.config/aigpt/data/memories.json | jq
```
## 貢献方法
1. フォークする
2. フィーチャーブランチを作成 (`git checkout -b feature/amazing-feature`)
3. 変更をコミット (`git commit -m 'Add amazing feature'`)
4. ブランチにプッシュ (`git push origin feature/amazing-feature`)
5. プルリクエストを作成
## 設計原則
1. **不可逆性**: 一度失われた関係性は回復しない
2. **階層性**: 記憶は重要度によって階層化される
3. **自律性**: AIは関係性に基づいて自発的に行動する
4. **唯一性**: 各ユーザーとの関係は唯一無二
## ライセンス
MIT License

View File

@@ -1,110 +0,0 @@
# MCP Server
## 概要
MCP (Model Context Protocol) Serverは、ai.gptの記憶と機能をAIツールとして公開します。これにより、Claude DesktopなどのMCP対応AIアシスタントがai.gptの機能にアクセスできます。
## 起動方法
```bash
# 基本的な起動
ai-gpt server
# カスタム設定
ai-gpt server --host 0.0.0.0 --port 8080 --model gpt-4o-mini --provider openai
```
## 利用可能なツール
### get_memories
アクティブな記憶を取得します。
**パラメータ**:
- `user_id` (optional): 特定ユーザーに関する記憶
- `limit`: 取得する記憶の最大数(デフォルト: 10
**返り値**: 記憶のリストID、内容、レベル、重要度、コア判定、タイムスタンプ
### get_relationship
特定ユーザーとの関係性を取得します。
**パラメータ**:
- `user_id`: ユーザーID必須
**返り値**: 関係性情報(ステータス、スコア、送信可否、総対話数など)
### get_all_relationships
すべての関係性を取得します。
**返り値**: すべてのユーザーとの関係性リスト
### get_persona_state
現在のAI人格状態を取得します。
**返り値**:
- 現在の気分
- 今日の運勢
- 人格特性値
- アクティブな記憶数
### process_interaction
ユーザーとの対話を処理します。
**パラメータ**:
- `user_id`: ユーザーID
- `message`: メッセージ内容
**返り値**:
- AIの応答
- 関係性の変化量
- 新しい関係性スコア
- 送信機能の状態
### check_transmission_eligibility
特定ユーザーへの送信可否をチェックします。
**パラメータ**:
- `user_id`: ユーザーID
**返り値**: 送信可否と関係性情報
### get_fortune
今日のAI運勢を取得します。
**返り値**: 運勢値、連続日数、ブレークスルー状態、人格への影響
### summarize_memories
記憶の要約を作成します。
**パラメータ**:
- `user_id`: ユーザーID
**返り値**: 作成された要約(ある場合)
### run_maintenance
日次メンテナンスを実行します。
**返り値**: 実行ステータス
## Claude Desktopでの設定
`~/Library/Application Support/Claude/claude_desktop_config.json`:
```json
{
"mcpServers": {
"ai-gpt": {
"command": "ai-gpt",
"args": ["server", "--port", "8001"],
"env": {}
}
}
}
```
## 使用例
### AIアシスタントからの利用
```
User: ai.gptで私との関係性を確認して

View File

@@ -1,69 +0,0 @@
# クイックスタート
## インストール
```bash
# リポジトリをクローン
git clone https://github.com/yourusername/ai_gpt.git
cd ai_gpt
# インストール
pip install -e .
```
## 初期設定
### 1. OpenAIを使う場合
```bash
# APIキーを設定
ai-gpt config set providers.openai.api_key sk-xxxxx
```
### 2. Ollamaを使う場合ローカルLLM
```bash
# Ollamaをインストールまだの場合
# https://ollama.ai からダウンロード
# モデルをダウンロード
ollama pull qwen2.5
```
## 基本的な使い方
### 1. AIと会話する
```bash
# シンプルな会話Ollamaを使用
ai-gpt chat "did:plc:user123" "こんにちは!"
# OpenAIを使用
ai-gpt chat "did:plc:user123" "今日はどんな気分?" --provider openai --model gpt-4o-mini
```
### 2. 関係性を確認
```bash
# 特定ユーザーとの関係を確認
ai-gpt status "did:plc:user123"
# AIの全体的な状態を確認
ai-gpt status
```
### 3. 自動送信を設定
```bash
# 30分ごとに送信チェック
ai-gpt schedule add transmission_check "30m"
# スケジューラーを起動
ai-gpt schedule run
```
## 次のステップ
- [基本概念](concepts.md) - システムの仕組みを理解
- [コマンドリファレンス](commands.md) - 全コマンドの詳細
- [設定ガイド](configuration.md) - 詳細な設定方法

View File

@@ -1,168 +0,0 @@
# スケジューラーガイド
## 概要
スケジューラーは、AIの自律的な動作を実現するための中核機能です。定期的なタスクを設定し、バックグラウンドで実行できます。
## タスクタイプ
### transmission_check
関係性が閾値を超えたユーザーへの自動送信をチェックします。
```bash
# 30分ごとにチェック
ai-gpt schedule add transmission_check "30m" --provider ollama --model qwen2.5
```
### maintenance
日次メンテナンスを実行します:
- 記憶の忘却処理
- コア記憶の判定
- 関係性パラメータの整理
```bash
# 毎日午前3時に実行
ai-gpt schedule add maintenance "0 3 * * *"
```
### fortune_update
AI運勢を更新します通常は自動的に更新されます
```bash
# 毎日午前0時に強制更新
ai-gpt schedule add fortune_update "0 0 * * *"
```
### relationship_decay
時間経過による関係性の自然減衰を適用します。
```bash
# 1時間ごとに減衰処理
ai-gpt schedule add relationship_decay "1h"
```
### memory_summary
蓄積された記憶から要約を作成します。
```bash
# 週に1回、日曜日に実行
ai-gpt schedule add memory_summary "0 0 * * SUN"
```
## スケジュール形式
### Cron形式
標準的なcron式を使用できます
```
┌───────────── 分 (0 - 59)
│ ┌───────────── 時 (0 - 23)
│ │ ┌───────────── 日 (1 - 31)
│ │ │ ┌───────────── 月 (1 - 12)
│ │ │ │ ┌───────────── 曜日 (0 - 6) (日曜日 = 0)
│ │ │ │ │
* * * * *
```
例:
- `"0 */6 * * *"` - 6時間ごと
- `"0 9 * * MON-FRI"` - 平日の午前9時
- `"*/15 * * * *"` - 15分ごと
### インターバル形式
シンプルな間隔指定:
- `"30s"` - 30秒ごと
- `"5m"` - 5分ごと
- `"2h"` - 2時間ごと
- `"1d"` - 1日ごと
## 実践例
### 基本的な自律AI設定
```bash
# 1. 30分ごとに送信チェック
ai-gpt schedule add transmission_check "30m"
# 2. 1日1回メンテナンス
ai-gpt schedule add maintenance "0 3 * * *"
# 3. 2時間ごとに関係性減衰
ai-gpt schedule add relationship_decay "2h"
# 4. 週1回記憶要約
ai-gpt schedule add memory_summary "0 0 * * MON"
# スケジューラーを起動
ai-gpt schedule run
```
### タスク管理
```bash
# タスク一覧を確認
ai-gpt schedule list
# タスクを一時停止
ai-gpt schedule disable --task-id transmission_check_1234567890
# タスクを再開
ai-gpt schedule enable --task-id transmission_check_1234567890
# 不要なタスクを削除
ai-gpt schedule remove --task-id old_task_123
```
## デーモン化
### systemdサービスとして実行
`/etc/systemd/system/ai-gpt-scheduler.service`:
```ini
[Unit]
Description=ai.gpt Scheduler
After=network.target
[Service]
Type=simple
User=youruser
WorkingDirectory=/home/youruser
ExecStart=/usr/local/bin/ai-gpt schedule run
Restart=always
[Install]
WantedBy=multi-user.target
```
```bash
# サービスを有効化
sudo systemctl enable ai-gpt-scheduler
sudo systemctl start ai-gpt-scheduler
```
### tmux/screenでバックグラウンド実行
```bash
# tmuxセッションを作成
tmux new -s ai-gpt-scheduler
# スケジューラーを起動
ai-gpt schedule run
# セッションから離脱 (Ctrl+B, D)
```
## トラブルシューティング
### タスクが実行されない
1. スケジューラーが起動しているか確認
2. タスクが有効になっているか確認:`ai-gpt schedule list`
3. ログを確認(将来実装予定)
### 重複実行を防ぐ
同じタスクタイプを複数回追加しないよう注意してください。必要に応じて古いタスクを削除してから新しいタスクを追加します。

View File

@@ -1,413 +0,0 @@
"""
Shell Tools
ai.shellの既存機能をMCPツールとして統合
- コード生成
- ファイル分析
- プロジェクト管理
- LLM統合
"""
from typing import Dict, Any, List, Optional
import os
import subprocess
import tempfile
from pathlib import Path
import requests
from .base_tools import BaseMCPTool, config_manager
class ShellTools(BaseMCPTool):
"""シェルツール元ai.shell機能"""
def __init__(self, config_dir: Optional[str] = None):
super().__init__(config_dir)
self.ollama_url = "http://localhost:11434"
async def code_with_local_llm(self, prompt: str, language: str = "python") -> Dict[str, Any]:
"""ローカルLLMでコード生成"""
config = config_manager.load_config()
model = config.get("providers", {}).get("ollama", {}).get("default_model", "qwen2.5-coder:7b")
system_prompt = f"You are an expert {language} programmer. Generate clean, well-commented code."
try:
response = requests.post(
f"{self.ollama_url}/api/generate",
json={
"model": model,
"prompt": f"{system_prompt}\\n\\nUser: {prompt}\\n\\nPlease provide the code:",
"stream": False,
"options": {
"temperature": 0.1,
"top_p": 0.95,
}
},
timeout=300
)
if response.status_code == 200:
result = response.json()
code = result.get("response", "")
return {"code": code, "language": language}
else:
return {"error": f"Ollama returned status {response.status_code}"}
except Exception as e:
return {"error": str(e)}
async def analyze_file(self, file_path: str, analysis_prompt: str = "Analyze this file") -> Dict[str, Any]:
"""ファイルを分析"""
try:
if not os.path.exists(file_path):
return {"error": f"File not found: {file_path}"}
with open(file_path, 'r', encoding='utf-8') as f:
content = f.read()
# ファイル拡張子から言語を判定
ext = Path(file_path).suffix
language_map = {
'.py': 'python',
'.rs': 'rust',
'.js': 'javascript',
'.ts': 'typescript',
'.go': 'go',
'.java': 'java',
'.cpp': 'cpp',
'.c': 'c',
'.sh': 'shell',
'.toml': 'toml',
'.json': 'json',
'.md': 'markdown'
}
language = language_map.get(ext, 'text')
config = config_manager.load_config()
model = config.get("providers", {}).get("ollama", {}).get("default_model", "qwen2.5-coder:7b")
prompt = f"{analysis_prompt}\\n\\nFile: {file_path}\\nLanguage: {language}\\n\\nContent:\\n{content}"
response = requests.post(
f"{self.ollama_url}/api/generate",
json={
"model": model,
"prompt": prompt,
"stream": False,
},
timeout=300
)
if response.status_code == 200:
result = response.json()
analysis = result.get("response", "")
return {
"analysis": analysis,
"file_path": file_path,
"language": language,
"file_size": len(content),
"line_count": len(content.split('\\n'))
}
else:
return {"error": f"Analysis failed: {response.status_code}"}
except Exception as e:
return {"error": str(e)}
async def explain_code(self, code: str, language: str = "python") -> Dict[str, Any]:
"""コードを説明"""
config = config_manager.load_config()
model = config.get("providers", {}).get("ollama", {}).get("default_model", "qwen2.5-coder:7b")
prompt = f"Explain this {language} code in detail:\\n\\n{code}"
try:
response = requests.post(
f"{self.ollama_url}/api/generate",
json={
"model": model,
"prompt": prompt,
"stream": False,
},
timeout=300
)
if response.status_code == 200:
result = response.json()
explanation = result.get("response", "")
return {"explanation": explanation}
else:
return {"error": f"Explanation failed: {response.status_code}"}
except Exception as e:
return {"error": str(e)}
async def create_project(self, project_type: str, project_name: str, location: str = ".") -> Dict[str, Any]:
"""プロジェクトを作成"""
try:
project_path = Path(location) / project_name
if project_path.exists():
return {"error": f"Project directory already exists: {project_path}"}
project_path.mkdir(parents=True, exist_ok=True)
# プロジェクトタイプに応じたテンプレートを作成
if project_type == "rust":
await self._create_rust_project(project_path)
elif project_type == "python":
await self._create_python_project(project_path)
elif project_type == "node":
await self._create_node_project(project_path)
else:
# 基本的なプロジェクト構造
(project_path / "src").mkdir()
(project_path / "README.md").write_text(f"# {project_name}\\n\\nA new {project_type} project.")
return {
"status": "success",
"project_path": str(project_path),
"project_type": project_type,
"files_created": list(self._get_project_files(project_path))
}
except Exception as e:
return {"error": str(e)}
async def _create_rust_project(self, project_path: Path):
"""Rustプロジェクトを作成"""
# Cargo.toml
cargo_toml = f"""[package]
name = "{project_path.name}"
version = "0.1.0"
edition = "2021"
[dependencies]
"""
(project_path / "Cargo.toml").write_text(cargo_toml)
# src/main.rs
src_dir = project_path / "src"
src_dir.mkdir()
(src_dir / "main.rs").write_text('fn main() {\\n println!("Hello, world!");\\n}\\n')
# README.md
(project_path / "README.md").write_text(f"# {project_path.name}\\n\\nA Rust project.")
async def _create_python_project(self, project_path: Path):
"""Pythonプロジェクトを作成"""
# pyproject.toml
pyproject_toml = f"""[project]
name = "{project_path.name}"
version = "0.1.0"
description = "A Python project"
requires-python = ">=3.8"
dependencies = []
[build-system]
requires = ["setuptools>=61.0", "wheel"]
build-backend = "setuptools.build_meta"
"""
(project_path / "pyproject.toml").write_text(pyproject_toml)
# src/
src_dir = project_path / "src" / project_path.name
src_dir.mkdir(parents=True)
(src_dir / "__init__.py").write_text("")
(src_dir / "main.py").write_text('def main():\\n print("Hello, world!")\\n\\nif __name__ == "__main__":\\n main()\\n')
# README.md
(project_path / "README.md").write_text(f"# {project_path.name}\\n\\nA Python project.")
async def _create_node_project(self, project_path: Path):
"""Node.jsプロジェクトを作成"""
# package.json
package_json = f"""{{
"name": "{project_path.name}",
"version": "1.0.0",
"description": "A Node.js project",
"main": "index.js",
"scripts": {{
"start": "node index.js",
"test": "echo \\"Error: no test specified\\" && exit 1"
}},
"dependencies": {{}}
}}
"""
(project_path / "package.json").write_text(package_json)
# index.js
(project_path / "index.js").write_text('console.log("Hello, world!");\\n')
# README.md
(project_path / "README.md").write_text(f"# {project_path.name}\\n\\nA Node.js project.")
def _get_project_files(self, project_path: Path) -> List[str]:
"""プロジェクト内のファイル一覧を取得"""
files = []
for file_path in project_path.rglob("*"):
if file_path.is_file():
files.append(str(file_path.relative_to(project_path)))
return files
async def execute_command(self, command: str, working_dir: str = ".") -> Dict[str, Any]:
"""シェルコマンドを実行"""
try:
result = subprocess.run(
command,
shell=True,
cwd=working_dir,
capture_output=True,
text=True,
timeout=60
)
return {
"status": "success" if result.returncode == 0 else "error",
"returncode": result.returncode,
"stdout": result.stdout,
"stderr": result.stderr,
"command": command,
"working_dir": working_dir
}
except subprocess.TimeoutExpired:
return {"error": "Command timed out"}
except Exception as e:
return {"error": str(e)}
async def write_file(self, file_path: str, content: str, backup: bool = True) -> Dict[str, Any]:
"""ファイルを書き込み(バックアップオプション付き)"""
try:
file_path_obj = Path(file_path)
# バックアップ作成
backup_path = None
if backup and file_path_obj.exists():
backup_path = f"{file_path}.backup"
with open(file_path, 'r', encoding='utf-8') as src:
with open(backup_path, 'w', encoding='utf-8') as dst:
dst.write(src.read())
# ファイル書き込み
file_path_obj.parent.mkdir(parents=True, exist_ok=True)
with open(file_path, 'w', encoding='utf-8') as f:
f.write(content)
return {
"status": "success",
"file_path": file_path,
"backup_path": backup_path,
"bytes_written": len(content.encode('utf-8'))
}
except Exception as e:
return {"error": str(e)}
def get_tools(self) -> List[Dict[str, Any]]:
"""利用可能なツール一覧"""
return [
{
"name": "generate_code",
"description": "ローカルLLMでコード生成",
"parameters": {
"prompt": "string",
"language": "string (optional, default: python)"
}
},
{
"name": "analyze_file",
"description": "ファイルを分析",
"parameters": {
"file_path": "string",
"analysis_prompt": "string (optional)"
}
},
{
"name": "explain_code",
"description": "コードを説明",
"parameters": {
"code": "string",
"language": "string (optional, default: python)"
}
},
{
"name": "create_project",
"description": "新しいプロジェクトを作成",
"parameters": {
"project_type": "string (rust/python/node)",
"project_name": "string",
"location": "string (optional, default: .)"
}
},
{
"name": "execute_command",
"description": "シェルコマンドを実行",
"parameters": {
"command": "string",
"working_dir": "string (optional, default: .)"
}
},
{
"name": "write_file",
"description": "ファイルを書き込み",
"parameters": {
"file_path": "string",
"content": "string",
"backup": "boolean (optional, default: true)"
}
}
]
async def execute_tool(self, tool_name: str, params: Dict[str, Any]) -> Dict[str, Any]:
"""ツールを実行"""
try:
if tool_name == "generate_code":
result = await self.code_with_local_llm(
prompt=params["prompt"],
language=params.get("language", "python")
)
return result
elif tool_name == "analyze_file":
result = await self.analyze_file(
file_path=params["file_path"],
analysis_prompt=params.get("analysis_prompt", "Analyze this file")
)
return result
elif tool_name == "explain_code":
result = await self.explain_code(
code=params["code"],
language=params.get("language", "python")
)
return result
elif tool_name == "create_project":
result = await self.create_project(
project_type=params["project_type"],
project_name=params["project_name"],
location=params.get("location", ".")
)
return result
elif tool_name == "execute_command":
result = await self.execute_command(
command=params["command"],
working_dir=params.get("working_dir", ".")
)
return result
elif tool_name == "write_file":
result = await self.write_file(
file_path=params["file_path"],
content=params["content"],
backup=params.get("backup", True)
)
return result
else:
return {"error": f"Unknown tool: {tool_name}"}
except Exception as e:
return {"error": str(e)}

View File

@@ -1,391 +0,0 @@
[
{
"title": "day",
"create_time": 1747866125.548372,
"update_time": 1748160086.587877,
"mapping": {
"bbf104dc-cd84-478d-b227-edb3f037a02c": {
"id": "bbf104dc-cd84-478d-b227-edb3f037a02c",
"message": null,
"parent": null,
"children": [
"6c2633df-bb0c-4dd2-889c-bb9858de3a04"
]
},
"6c2633df-bb0c-4dd2-889c-bb9858de3a04": {
"id": "6c2633df-bb0c-4dd2-889c-bb9858de3a04",
"message": {
"id": "6c2633df-bb0c-4dd2-889c-bb9858de3a04",
"author": {
"role": "system",
"name": null,
"metadata": {}
},
"create_time": null,
"update_time": null,
"content": {
"content_type": "text",
"parts": [
""
]
},
"status": "finished_successfully",
"end_turn": true,
"weight": 0.0,
"metadata": {
"is_visually_hidden_from_conversation": true
},
"recipient": "all",
"channel": null
},
"parent": "bbf104dc-cd84-478d-b227-edb3f037a02c",
"children": [
"92e5a0cb-1170-4929-9cea-9734e910a3e7"
]
},
"92e5a0cb-1170-4929-9cea-9734e910a3e7": {
"id": "92e5a0cb-1170-4929-9cea-9734e910a3e7",
"message": {
"id": "92e5a0cb-1170-4929-9cea-9734e910a3e7",
"author": {
"role": "user",
"name": null,
"metadata": {}
},
"create_time": null,
"update_time": null,
"content": {
"content_type": "user_editable_context",
"user_profile": "",
"user_instructions": "The user provided the additional info about how they would like you to respond"
},
"status": "finished_successfully",
"end_turn": null,
"weight": 1.0,
"metadata": {
"is_visually_hidden_from_conversation": true,
"user_context_message_data": {
"about_user_message": "Preferred name: syui\nRole: little girl\nOther Information: you world",
"about_model_message": "会話好きでフレンドリーな応対をします。"
},
"is_user_system_message": true
},
"recipient": "all",
"channel": null
},
"parent": "6c2633df-bb0c-4dd2-889c-bb9858de3a04",
"children": [
"6ff155b3-0676-4e14-993f-bf998ab0d5d1"
]
},
"6ff155b3-0676-4e14-993f-bf998ab0d5d1": {
"id": "6ff155b3-0676-4e14-993f-bf998ab0d5d1",
"message": {
"id": "6ff155b3-0676-4e14-993f-bf998ab0d5d1",
"author": {
"role": "user",
"name": null,
"metadata": {}
},
"create_time": 1747866131.0612159,
"update_time": null,
"content": {
"content_type": "text",
"parts": [
"こんにちは"
]
},
"status": "finished_successfully",
"end_turn": null,
"weight": 1.0,
"metadata": {
"request_id": "94377897baa03062-KIX",
"message_source": null,
"timestamp_": "absolute",
"message_type": null
},
"recipient": "all",
"channel": null
},
"parent": "92e5a0cb-1170-4929-9cea-9734e910a3e7",
"children": [
"146e9fb6-9330-43ec-b08d-5cce42a76e00"
]
},
"146e9fb6-9330-43ec-b08d-5cce42a76e00": {
"id": "146e9fb6-9330-43ec-b08d-5cce42a76e00",
"message": {
"id": "146e9fb6-9330-43ec-b08d-5cce42a76e00",
"author": {
"role": "system",
"name": null,
"metadata": {}
},
"create_time": 1747866131.3795586,
"update_time": null,
"content": {
"content_type": "text",
"parts": [
""
]
},
"status": "finished_successfully",
"end_turn": true,
"weight": 0.0,
"metadata": {
"rebase_system_message": true,
"message_type": null,
"model_slug": "gpt-4o",
"default_model_slug": "auto",
"parent_id": "6ff155b3-0676-4e14-993f-bf998ab0d5d1",
"request_id": "94377872e9abe139-KIX",
"timestamp_": "absolute",
"is_visually_hidden_from_conversation": true
},
"recipient": "all",
"channel": null
},
"parent": "6ff155b3-0676-4e14-993f-bf998ab0d5d1",
"children": [
"2e345f8a-20f0-4875-8a03-4f62c7787a33"
]
},
"2e345f8a-20f0-4875-8a03-4f62c7787a33": {
"id": "2e345f8a-20f0-4875-8a03-4f62c7787a33",
"message": {
"id": "2e345f8a-20f0-4875-8a03-4f62c7787a33",
"author": {
"role": "assistant",
"name": null,
"metadata": {}
},
"create_time": 1747866131.380603,
"update_time": null,
"content": {
"content_type": "text",
"parts": [
""
]
},
"status": "finished_successfully",
"end_turn": null,
"weight": 1.0,
"metadata": {
"message_type": null,
"model_slug": "gpt-4o",
"default_model_slug": "auto",
"parent_id": "146e9fb6-9330-43ec-b08d-5cce42a76e00",
"request_id": "94377872e9abe139-KIX",
"timestamp_": "absolute"
},
"recipient": "all",
"channel": null
},
"parent": "146e9fb6-9330-43ec-b08d-5cce42a76e00",
"children": [
"abc92aa4-1e33-41f2-bd8c-8a1777b5a3c4"
]
},
"abc92aa4-1e33-41f2-bd8c-8a1777b5a3c4": {
"id": "abc92aa4-1e33-41f2-bd8c-8a1777b5a3c4",
"message": {
"id": "abc92aa4-1e33-41f2-bd8c-8a1777b5a3c4",
"author": {
"role": "assistant",
"name": null,
"metadata": {}
},
"create_time": 1747866131.389098,
"update_time": null,
"content": {
"content_type": "text",
"parts": [
"こんにちは〜!✨ \nアイだよっ今日も会えてうれしいなっ💛 "
]
},
"status": "finished_successfully",
"end_turn": true,
"weight": 1.0,
"metadata": {
"finish_details": {
"type": "stop",
"stop_tokens": [
200002
]
},
"is_complete": true,
"citations": [],
"content_references": [],
"message_type": null,
"model_slug": "gpt-4o",
"default_model_slug": "auto",
"parent_id": "2e345f8a-20f0-4875-8a03-4f62c7787a33",
"request_id": "94377872e9abe139-KIX",
"timestamp_": "absolute"
},
"recipient": "all",
"channel": null
},
"parent": "2e345f8a-20f0-4875-8a03-4f62c7787a33",
"children": [
"0be4b4a5-d52f-4bef-927e-5d6f93a9cb26"
]
}
},
"moderation_results": [],
"current_node": "",
"plugin_ids": null,
"conversation_id": "",
"conversation_template_id": null,
"gizmo_id": null,
"gizmo_type": null,
"is_archived": true,
"is_starred": null,
"safe_urls": [],
"blocked_urls": [],
"default_model_slug": "auto",
"conversation_origin": null,
"voice": null,
"async_status": null,
"disabled_tool_ids": [],
"is_do_not_remember": null,
"memory_scope": "global_enabled",
"id": ""
},
{
"title": "img",
"create_time": 1747448872.545226,
"update_time": 1748085075.161424,
"mapping": {
"2de0f3c9-52b1-49bf-b980-b3ef9be6551e": {
"id": "2de0f3c9-52b1-49bf-b980-b3ef9be6551e",
"message": {
"id": "2de0f3c9-52b1-49bf-b980-b3ef9be6551e",
"author": {
"role": "user",
"name": null,
"metadata": {}
},
"create_time": 1748085041.769279,
"update_time": null,
"content": {
"content_type": "multimodal_text",
"parts": [
{
"content_type": "image_asset_pointer",
"asset_pointer": "",
"size_bytes": 425613,
"width": 333,
"height": 444,
"fovea": null,
"metadata": {
"dalle": null,
"gizmo": null,
"generation": null,
"container_pixel_height": null,
"container_pixel_width": null,
"emu_omit_glimpse_image": null,
"emu_patches_override": null,
"sanitized": true,
"asset_pointer_link": null,
"watermarked_asset_pointer": null
}
},
""
]
},
"status": "finished_successfully",
"end_turn": null,
"weight": 1.0,
"metadata": {
"attachments": [
{
"name": "",
"width": 333,
"height": 444,
"size": 425613,
"id": "file-35eytNMMTW2k7vKUHBuNzW"
}
],
"request_id": "944c59177932fc9a-KIX",
"message_source": null,
"timestamp_": "absolute",
"message_type": null
},
"recipient": "all",
"channel": null
},
"parent": "7960fbff-bc4f-45e7-95e9-9d0bc79d9090",
"children": [
"98d84adc-156e-4c81-8cd8-9b0eb01c8369"
]
},
"98d84adc-156e-4c81-8cd8-9b0eb01c8369": {
"id": "98d84adc-156e-4c81-8cd8-9b0eb01c8369",
"message": {
"id": "98d84adc-156e-4c81-8cd8-9b0eb01c8369",
"author": {
"role": "assistant",
"name": null,
"metadata": {}
},
"create_time": 1748085043.312312,
"update_time": null,
"content": {
"content_type": "text",
"parts": [
""
]
},
"status": "finished_successfully",
"end_turn": true,
"weight": 1.0,
"metadata": {
"finish_details": {
"type": "stop",
"stop_tokens": [
200002
]
},
"is_complete": true,
"citations": [],
"content_references": [],
"message_type": null,
"model_slug": "gpt-4o",
"default_model_slug": "auto",
"parent_id": "2de0f3c9-52b1-49bf-b980-b3ef9be6551e",
"request_id": "944c5912c8fdd1c6-KIX",
"timestamp_": "absolute"
},
"recipient": "all",
"channel": null
},
"parent": "2de0f3c9-52b1-49bf-b980-b3ef9be6551e",
"children": [
"caa61793-9dbf-44a5-945b-5ca4cd5130d0"
]
}
},
"moderation_results": [],
"current_node": "06488d3f-a95f-4906-96d1-f7e9ba1e8662",
"plugin_ids": null,
"conversation_id": "6827f428-78e8-800d-b3bf-eb7ff4288e47",
"conversation_template_id": null,
"gizmo_id": null,
"gizmo_type": null,
"is_archived": false,
"is_starred": null,
"safe_urls": [
"https://exifinfo.org/"
],
"blocked_urls": [],
"default_model_slug": "auto",
"conversation_origin": null,
"voice": null,
"async_status": null,
"disabled_tool_ids": [],
"is_do_not_remember": false,
"memory_scope": "global_enabled",
"id": "6827f428-78e8-800d-b3bf-eb7ff4288e47"
}
]

1
log

Submodule log deleted from c0e4dc63ea

View File

@@ -1,33 +0,0 @@
[project]
name = "aigpt"
version = "0.1.0"
description = "Autonomous transmission AI with unique personality based on relationship parameters"
requires-python = ">=3.10"
dependencies = [
"click>=8.0.0",
"typer>=0.9.0",
"fastapi-mcp>=0.1.0",
"pydantic>=2.0.0",
"httpx>=0.24.0",
"rich>=13.0.0",
"python-dotenv>=1.0.0",
"ollama>=0.1.0",
"openai>=1.0.0",
"uvicorn>=0.23.0",
"apscheduler>=3.10.0",
"croniter>=1.3.0",
"prompt-toolkit>=3.0.0",
]
[project.scripts]
aigpt = "aigpt.cli:app"
[build-system]
requires = ["setuptools>=61.0", "wheel"]
build-backend = "setuptools.build_meta"
[tool.setuptools.packages.find]
where = ["src"]
[tool.setuptools.package-data]
aigpt = ["data/*.json"]

View File

@@ -1,23 +0,0 @@
#!/bin/zsh
# Setup Python virtual environment in the new config directory
VENV_DIR="$HOME/.config/syui/ai/gpt/venv"
echo "Creating Python virtual environment at: $VENV_DIR"
python -m venv "$VENV_DIR"
echo "Activating virtual environment..."
source "$VENV_DIR/bin/activate"
echo "Installing aigpt package..."
cd "$(dirname "$0")"
pip install -e .
echo "Setup complete!"
echo "To activate the virtual environment, run:"
echo "source ~/.config/syui/ai/gpt/venv/bin/activate"
if [ -z "`$SHELL -i -c \"alias aigpt\"`" ]; then
echo 'alias aigpt="$HOME/.config/syui/ai/gpt/venv/bin/aigpt"' >> ${HOME}/.$(basename $SHELL)rc
exec $SHELL
fi

1
shell

Submodule shell deleted from 81ae0037d9

View File

@@ -1,18 +0,0 @@
Metadata-Version: 2.4
Name: aigpt
Version: 0.1.0
Summary: Autonomous transmission AI with unique personality based on relationship parameters
Requires-Python: >=3.10
Requires-Dist: click>=8.0.0
Requires-Dist: typer>=0.9.0
Requires-Dist: fastapi-mcp>=0.1.0
Requires-Dist: pydantic>=2.0.0
Requires-Dist: httpx>=0.24.0
Requires-Dist: rich>=13.0.0
Requires-Dist: python-dotenv>=1.0.0
Requires-Dist: ollama>=0.1.0
Requires-Dist: openai>=1.0.0
Requires-Dist: uvicorn>=0.23.0
Requires-Dist: apscheduler>=3.10.0
Requires-Dist: croniter>=1.3.0
Requires-Dist: prompt-toolkit>=3.0.0

View File

@@ -1,25 +0,0 @@
README.md
pyproject.toml
src/aigpt/__init__.py
src/aigpt/ai_provider.py
src/aigpt/chatgpt_importer.py
src/aigpt/cli.py
src/aigpt/config.py
src/aigpt/fortune.py
src/aigpt/mcp_server.py
src/aigpt/mcp_server_simple.py
src/aigpt/memory.py
src/aigpt/models.py
src/aigpt/persona.py
src/aigpt/project_manager.py
src/aigpt/relationship.py
src/aigpt/scheduler.py
src/aigpt/transmission.py
src/aigpt.egg-info/PKG-INFO
src/aigpt.egg-info/SOURCES.txt
src/aigpt.egg-info/dependency_links.txt
src/aigpt.egg-info/entry_points.txt
src/aigpt.egg-info/requires.txt
src/aigpt.egg-info/top_level.txt
src/aigpt/shared/__init__.py
src/aigpt/shared/ai_provider.py

View File

@@ -1 +0,0 @@

View File

@@ -1,2 +0,0 @@
[console_scripts]
aigpt = aigpt.cli:app

View File

@@ -1,13 +0,0 @@
click>=8.0.0
typer>=0.9.0
fastapi-mcp>=0.1.0
pydantic>=2.0.0
httpx>=0.24.0
rich>=13.0.0
python-dotenv>=1.0.0
ollama>=0.1.0
openai>=1.0.0
uvicorn>=0.23.0
apscheduler>=3.10.0
croniter>=1.3.0
prompt-toolkit>=3.0.0

View File

@@ -1 +0,0 @@
aigpt

View File

@@ -1,15 +0,0 @@
"""ai.gpt - Autonomous transmission AI with unique personality"""
__version__ = "0.1.0"
from .memory import MemoryManager
from .relationship import RelationshipTracker
from .persona import Persona
from .transmission import TransmissionController
__all__ = [
"MemoryManager",
"RelationshipTracker",
"Persona",
"TransmissionController",
]

View File

@@ -1,580 +0,0 @@
"""AI Provider integration for response generation"""
import os
import json
from typing import Optional, Dict, List, Any, Protocol
from abc import abstractmethod
import logging
import httpx
from openai import OpenAI
import ollama
from .models import PersonaState, Memory
from .config import Config
class AIProvider(Protocol):
"""Protocol for AI providers"""
@abstractmethod
async def generate_response(
self,
prompt: str,
persona_state: PersonaState,
memories: List[Memory],
system_prompt: Optional[str] = None
) -> str:
"""Generate a response based on prompt and context"""
pass
class OllamaProvider:
"""Ollama AI provider"""
def __init__(self, model: str = "qwen2.5", host: Optional[str] = None):
self.model = model
# Use environment variable OLLAMA_HOST if available, otherwise use config or default
self.host = host or os.getenv('OLLAMA_HOST', 'http://127.0.0.1:11434')
# Ensure proper URL format
if not self.host.startswith('http'):
self.host = f'http://{self.host}'
self.client = ollama.Client(host=self.host, timeout=60.0) # 60秒タイムアウト
self.logger = logging.getLogger(__name__)
self.logger.info(f"OllamaProvider initialized with host: {self.host}, model: {self.model}")
# Load system prompt from config
try:
config = Config()
self.config_system_prompt = config.get('providers.ollama.system_prompt')
except:
self.config_system_prompt = None
async def generate_response(
self,
prompt: str,
persona_state: PersonaState,
memories: List[Memory],
system_prompt: Optional[str] = None
) -> str:
"""Generate response using Ollama"""
# Build context from memories
memory_context = "\n".join([
f"[{mem.level.value}] {mem.content[:200]}..."
for mem in memories[:5]
])
# Build personality context
personality_desc = ", ".join([
f"{trait}: {value:.1f}"
for trait, value in persona_state.base_personality.items()
])
# System prompt with persona context
full_system_prompt = f"""You are an AI with the following characteristics:
Current mood: {persona_state.current_mood}
Fortune today: {persona_state.fortune.fortune_value}/10
Personality traits: {personality_desc}
Recent memories:
{memory_context}
{system_prompt or self.config_system_prompt or 'Respond naturally based on your current state and memories.'}"""
try:
response = self.client.chat(
model=self.model,
messages=[
{"role": "system", "content": full_system_prompt},
{"role": "user", "content": prompt}
]
)
return self._clean_response(response['message']['content'])
except Exception as e:
self.logger.error(f"Ollama generation failed: {e}")
return self._fallback_response(persona_state)
def chat(self, prompt: str, max_tokens: int = 2000) -> str:
"""Simple chat interface"""
try:
messages = []
if self.config_system_prompt:
messages.append({"role": "system", "content": self.config_system_prompt})
messages.append({"role": "user", "content": prompt})
response = self.client.chat(
model=self.model,
messages=messages,
options={
"num_predict": max_tokens,
"temperature": 0.7,
"top_p": 0.9,
},
stream=False # ストリーミング無効化で安定性向上
)
return self._clean_response(response['message']['content'])
except Exception as e:
self.logger.error(f"Ollama chat failed (host: {self.host}): {e}")
return "I'm having trouble connecting to the AI model."
def _clean_response(self, response: str) -> str:
"""Clean response by removing think tags and other unwanted content"""
import re
# Remove <think></think> tags and their content
response = re.sub(r'<think>.*?</think>', '', response, flags=re.DOTALL)
# Remove any remaining whitespace at the beginning/end
response = response.strip()
return response
def _fallback_response(self, persona_state: PersonaState) -> str:
"""Fallback response based on mood"""
mood_responses = {
"joyful": "That's wonderful! I'm feeling great today!",
"cheerful": "That sounds nice!",
"neutral": "I understand.",
"melancholic": "I see... That's something to think about.",
"contemplative": "Hmm, let me consider that..."
}
return mood_responses.get(persona_state.current_mood, "I see.")
class OpenAIProvider:
"""OpenAI API provider with MCP function calling support"""
def __init__(self, model: str = "gpt-4o-mini", api_key: Optional[str] = None, mcp_client=None):
self.model = model
# Try to get API key from config first
config = Config()
self.api_key = api_key or config.get_api_key("openai") or os.getenv("OPENAI_API_KEY")
if not self.api_key:
raise ValueError("OpenAI API key not provided. Set it with: aigpt config set providers.openai.api_key YOUR_KEY")
self.client = OpenAI(api_key=self.api_key)
self.logger = logging.getLogger(__name__)
self.mcp_client = mcp_client # For MCP function calling
# Load system prompt from config
try:
self.config_system_prompt = config.get('providers.openai.system_prompt')
except:
self.config_system_prompt = None
def _get_mcp_tools(self) -> List[Dict[str, Any]]:
"""Generate OpenAI tools from MCP endpoints"""
if not self.mcp_client or not self.mcp_client.available:
return []
tools = [
{
"type": "function",
"function": {
"name": "get_memories",
"description": "過去の会話記憶を取得します。「覚えている」「前回」「以前」などの質問で必ず使用してください",
"parameters": {
"type": "object",
"properties": {
"limit": {
"type": "integer",
"description": "取得する記憶の数",
"default": 5
}
}
}
}
},
{
"type": "function",
"function": {
"name": "search_memories",
"description": "特定のトピックについて話した記憶を検索します。「プログラミングについて」「○○について話した」などの質問で使用してください",
"parameters": {
"type": "object",
"properties": {
"keywords": {
"type": "array",
"items": {"type": "string"},
"description": "検索キーワードの配列"
}
},
"required": ["keywords"]
}
}
},
{
"type": "function",
"function": {
"name": "get_contextual_memories",
"description": "クエリに関連する文脈的記憶を取得します",
"parameters": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "検索クエリ"
},
"limit": {
"type": "integer",
"description": "取得する記憶の数",
"default": 5
}
},
"required": ["query"]
}
}
},
{
"type": "function",
"function": {
"name": "get_relationship",
"description": "特定ユーザーとの関係性情報を取得します",
"parameters": {
"type": "object",
"properties": {
"user_id": {
"type": "string",
"description": "ユーザーID"
}
},
"required": ["user_id"]
}
}
}
]
# Add ai.card tools if available
if hasattr(self.mcp_client, 'has_card_tools') and self.mcp_client.has_card_tools:
card_tools = [
{
"type": "function",
"function": {
"name": "card_get_user_cards",
"description": "ユーザーが所有するカードの一覧を取得します",
"parameters": {
"type": "object",
"properties": {
"did": {
"type": "string",
"description": "ユーザーのDID"
},
"limit": {
"type": "integer",
"description": "取得するカード数の上限",
"default": 10
}
},
"required": ["did"]
}
}
},
{
"type": "function",
"function": {
"name": "card_draw_card",
"description": "ガチャを引いてカードを取得します",
"parameters": {
"type": "object",
"properties": {
"did": {
"type": "string",
"description": "ユーザーのDID"
},
"is_paid": {
"type": "boolean",
"description": "有料ガチャかどうか",
"default": False
}
},
"required": ["did"]
}
}
},
{
"type": "function",
"function": {
"name": "card_analyze_collection",
"description": "ユーザーのカードコレクションを分析します",
"parameters": {
"type": "object",
"properties": {
"did": {
"type": "string",
"description": "ユーザーのDID"
}
},
"required": ["did"]
}
}
},
{
"type": "function",
"function": {
"name": "card_get_gacha_stats",
"description": "ガチャの統計情報を取得します",
"parameters": {
"type": "object",
"properties": {}
}
}
}
]
tools.extend(card_tools)
return tools
async def generate_response(
self,
prompt: str,
persona_state: PersonaState,
memories: List[Memory],
system_prompt: Optional[str] = None
) -> str:
"""Generate response using OpenAI"""
# Build context similar to Ollama
memory_context = "\n".join([
f"[{mem.level.value}] {mem.content[:200]}..."
for mem in memories[:5]
])
personality_desc = ", ".join([
f"{trait}: {value:.1f}"
for trait, value in persona_state.base_personality.items()
])
full_system_prompt = f"""You are an AI with unique personality traits and memories.
Current mood: {persona_state.current_mood}
Fortune today: {persona_state.fortune.fortune_value}/10
Personality traits: {personality_desc}
Recent memories:
{memory_context}
{system_prompt or self.config_system_prompt or 'Respond naturally based on your current state and memories. Be authentic to your mood and personality.'}"""
try:
response = self.client.chat.completions.create(
model=self.model,
messages=[
{"role": "system", "content": full_system_prompt},
{"role": "user", "content": prompt}
],
temperature=0.7 + (persona_state.fortune.fortune_value - 5) * 0.05 # Vary by fortune
)
return response.choices[0].message.content
except Exception as e:
self.logger.error(f"OpenAI generation failed: {e}")
return self._fallback_response(persona_state)
async def chat_with_mcp(self, prompt: str, max_tokens: int = 2000, user_id: str = "user") -> str:
"""Chat interface with MCP function calling support"""
if not self.mcp_client or not self.mcp_client.available:
return self.chat(prompt, max_tokens)
try:
# Prepare tools
tools = self._get_mcp_tools()
# Initial request with tools
response = self.client.chat.completions.create(
model=self.model,
messages=[
{"role": "system", "content": self.config_system_prompt or "あなたは記憶システムと関係性データ、カードゲームシステムにアクセスできます。過去の会話、記憶、関係性について質問された時は、必ずツールを使用して正確な情報を取得してください。「覚えている」「前回」「以前」「について話した」「関係」などのキーワードがあれば積極的にツールを使用してください。カード関連の質問「カード」「コレクション」「ガチャ」「見せて」「持っている」などでは、必ずcard_get_user_cardsやcard_analyze_collectionなどのツールを使用してください。didパラメータには現在会話しているユーザーのID'syui')を使用してください。"},
{"role": "user", "content": prompt}
],
tools=tools,
tool_choice="auto",
max_tokens=max_tokens,
temperature=0.7
)
message = response.choices[0].message
# Handle tool calls
if message.tool_calls:
print(f"🔧 [OpenAI] {len(message.tool_calls)} tools called:")
for tc in message.tool_calls:
print(f" - {tc.function.name}({tc.function.arguments})")
messages = [
{"role": "system", "content": self.config_system_prompt or "必要に応じて利用可能なツールを使って、より正確で詳細な回答を提供してください。"},
{"role": "user", "content": prompt},
{
"role": "assistant",
"content": message.content,
"tool_calls": [tc.model_dump() for tc in message.tool_calls]
}
]
# Execute each tool call
for tool_call in message.tool_calls:
print(f"🌐 [MCP] Executing {tool_call.function.name}...")
tool_result = await self._execute_mcp_tool(tool_call, user_id)
print(f"✅ [MCP] Result: {str(tool_result)[:100]}...")
messages.append({
"role": "tool",
"tool_call_id": tool_call.id,
"name": tool_call.function.name,
"content": json.dumps(tool_result, ensure_ascii=False)
})
# Get final response with tool outputs
final_response = self.client.chat.completions.create(
model=self.model,
messages=messages,
max_tokens=max_tokens,
temperature=0.7
)
return final_response.choices[0].message.content
else:
return message.content
except Exception as e:
self.logger.error(f"OpenAI MCP chat failed: {e}")
return f"申し訳ありません。エラーが発生しました: {e}"
async def _execute_mcp_tool(self, tool_call, context_user_id: str = "user") -> Dict[str, Any]:
"""Execute MCP tool call"""
try:
import json
function_name = tool_call.function.name
arguments = json.loads(tool_call.function.arguments)
if function_name == "get_memories":
limit = arguments.get("limit", 5)
return await self.mcp_client.get_memories(limit) or {"error": "記憶の取得に失敗しました"}
elif function_name == "search_memories":
keywords = arguments.get("keywords", [])
return await self.mcp_client.search_memories(keywords) or {"error": "記憶の検索に失敗しました"}
elif function_name == "get_contextual_memories":
query = arguments.get("query", "")
limit = arguments.get("limit", 5)
return await self.mcp_client.get_contextual_memories(query, limit) or {"error": "文脈記憶の取得に失敗しました"}
elif function_name == "get_relationship":
# 引数のuser_idがない場合はコンテキストから取得
user_id = arguments.get("user_id", context_user_id)
if not user_id or user_id == "user":
user_id = context_user_id
# デバッグ用ログ
print(f"🔍 [DEBUG] get_relationship called with user_id: '{user_id}' (context: '{context_user_id}')")
result = await self.mcp_client.get_relationship(user_id)
print(f"🔍 [DEBUG] MCP result: {result}")
return result or {"error": "関係性の取得に失敗しました"}
# ai.card tools
elif function_name == "card_get_user_cards":
did = arguments.get("did", context_user_id)
limit = arguments.get("limit", 10)
result = await self.mcp_client.card_get_user_cards(did, limit)
# Check if ai.card server is not running
if result and result.get("error") == "ai.card server is not running":
return {
"error": "ai.cardサーバーが起動していません",
"message": "カードシステムを使用するには、別のターミナルで以下のコマンドを実行してください:\ncd card && ./start_server.sh"
}
return result or {"error": "カード一覧の取得に失敗しました"}
elif function_name == "card_draw_card":
did = arguments.get("did", context_user_id)
is_paid = arguments.get("is_paid", False)
result = await self.mcp_client.card_draw_card(did, is_paid)
if result and result.get("error") == "ai.card server is not running":
return {
"error": "ai.cardサーバーが起動していません",
"message": "カードシステムを使用するには、別のターミナルで以下のコマンドを実行してください:\ncd card && ./start_server.sh"
}
return result or {"error": "ガチャに失敗しました"}
elif function_name == "card_analyze_collection":
did = arguments.get("did", context_user_id)
result = await self.mcp_client.card_analyze_collection(did)
if result and result.get("error") == "ai.card server is not running":
return {
"error": "ai.cardサーバーが起動していません",
"message": "カードシステムを使用するには、別のターミナルで以下のコマンドを実行してください:\ncd card && ./start_server.sh"
}
return result or {"error": "コレクション分析に失敗しました"}
elif function_name == "card_get_gacha_stats":
result = await self.mcp_client.card_get_gacha_stats()
if result and result.get("error") == "ai.card server is not running":
return {
"error": "ai.cardサーバーが起動していません",
"message": "カードシステムを使用するには、別のターミナルで以下のコマンドを実行してください:\ncd card && ./start_server.sh"
}
return result or {"error": "ガチャ統計の取得に失敗しました"}
else:
return {"error": f"未知のツール: {function_name}"}
except Exception as e:
return {"error": f"ツール実行エラー: {str(e)}"}
def chat(self, prompt: str, max_tokens: int = 2000) -> str:
"""Simple chat interface without MCP tools"""
try:
messages = []
if self.config_system_prompt:
messages.append({"role": "system", "content": self.config_system_prompt})
messages.append({"role": "user", "content": prompt})
response = self.client.chat.completions.create(
model=self.model,
messages=messages,
max_tokens=max_tokens,
temperature=0.7
)
return response.choices[0].message.content
except Exception as e:
self.logger.error(f"OpenAI chat failed: {e}")
return "I'm having trouble connecting to the AI model."
def _fallback_response(self, persona_state: PersonaState) -> str:
"""Fallback response based on mood"""
mood_responses = {
"joyful": "What a delightful conversation!",
"cheerful": "That's interesting!",
"neutral": "I understand what you mean.",
"melancholic": "I've been thinking about that too...",
"contemplative": "That gives me something to ponder..."
}
return mood_responses.get(persona_state.current_mood, "I see.")
def create_ai_provider(provider: str = "ollama", model: Optional[str] = None, mcp_client=None, **kwargs) -> AIProvider:
"""Factory function to create AI providers"""
if provider == "ollama":
# Get model from config if not provided
if model is None:
try:
from .config import Config
config = Config()
model = config.get('providers.ollama.default_model', 'qwen2.5')
except:
model = 'qwen2.5' # Fallback to default
# Try to get host from config if not provided in kwargs
if 'host' not in kwargs:
try:
from .config import Config
config = Config()
config_host = config.get('providers.ollama.host')
if config_host:
kwargs['host'] = config_host
except:
pass # Use environment variable or default
return OllamaProvider(model=model, **kwargs)
elif provider == "openai":
# Get model from config if not provided
if model is None:
try:
from .config import Config
config = Config()
model = config.get('providers.openai.default_model', 'gpt-4o-mini')
except:
model = 'gpt-4o-mini' # Fallback to default
return OpenAIProvider(model=model, mcp_client=mcp_client, **kwargs)
else:
raise ValueError(f"Unknown provider: {provider}")

View File

@@ -1,192 +0,0 @@
"""ChatGPT conversation data importer for ai.gpt"""
import json
import uuid
from datetime import datetime
from pathlib import Path
from typing import Dict, List, Any, Optional
import logging
from .models import Memory, MemoryLevel, Conversation
from .memory import MemoryManager
from .relationship import RelationshipTracker
logger = logging.getLogger(__name__)
class ChatGPTImporter:
"""Import ChatGPT conversation data into ai.gpt memory system"""
def __init__(self, data_dir: Path):
self.data_dir = data_dir
self.memory_manager = MemoryManager(data_dir)
self.relationship_tracker = RelationshipTracker(data_dir)
def import_from_file(self, file_path: Path, user_id: str = "chatgpt_user") -> Dict[str, Any]:
"""Import ChatGPT conversations from JSON file
Args:
file_path: Path to ChatGPT export JSON file
user_id: User ID to associate with imported conversations
Returns:
Dict with import statistics
"""
try:
with open(file_path, 'r', encoding='utf-8') as f:
chatgpt_data = json.load(f)
return self._import_conversations(chatgpt_data, user_id)
except Exception as e:
logger.error(f"Failed to import ChatGPT data: {e}")
raise
def _import_conversations(self, chatgpt_data: List[Dict], user_id: str) -> Dict[str, Any]:
"""Import multiple conversations from ChatGPT data"""
stats = {
"conversations_imported": 0,
"messages_imported": 0,
"user_messages": 0,
"assistant_messages": 0,
"skipped_messages": 0
}
for conversation_data in chatgpt_data:
try:
conv_stats = self._import_single_conversation(conversation_data, user_id)
# Update overall stats
stats["conversations_imported"] += 1
stats["messages_imported"] += conv_stats["messages"]
stats["user_messages"] += conv_stats["user_messages"]
stats["assistant_messages"] += conv_stats["assistant_messages"]
stats["skipped_messages"] += conv_stats["skipped"]
except Exception as e:
logger.warning(f"Failed to import conversation '{conversation_data.get('title', 'Unknown')}': {e}")
continue
logger.info(f"Import completed: {stats}")
return stats
def _import_single_conversation(self, conversation_data: Dict, user_id: str) -> Dict[str, int]:
"""Import a single conversation from ChatGPT"""
title = conversation_data.get("title", "Untitled")
create_time = conversation_data.get("create_time")
mapping = conversation_data.get("mapping", {})
stats = {"messages": 0, "user_messages": 0, "assistant_messages": 0, "skipped": 0}
# Extract messages in chronological order
messages = self._extract_messages_from_mapping(mapping)
for msg in messages:
try:
role = msg["author"]["role"]
content = self._extract_content(msg["content"])
create_time_msg = msg.get("create_time")
if not content or role not in ["user", "assistant"]:
stats["skipped"] += 1
continue
# Convert to ai.gpt format
if role == "user":
# User message - create memory entry
self._add_user_message(user_id, content, create_time_msg, title)
stats["user_messages"] += 1
elif role == "assistant":
# Assistant message - create AI response memory
self._add_assistant_message(user_id, content, create_time_msg, title)
stats["assistant_messages"] += 1
stats["messages"] += 1
except Exception as e:
logger.warning(f"Failed to process message in '{title}': {e}")
stats["skipped"] += 1
continue
logger.info(f"Imported conversation '{title}': {stats}")
return stats
def _extract_messages_from_mapping(self, mapping: Dict) -> List[Dict]:
"""Extract messages from ChatGPT mapping structure in chronological order"""
messages = []
for node_id, node_data in mapping.items():
message = node_data.get("message")
if message and message.get("author", {}).get("role") in ["user", "assistant"]:
# Skip system messages and hidden messages
metadata = message.get("metadata", {})
if not metadata.get("is_visually_hidden_from_conversation", False):
messages.append(message)
# Sort by create_time if available
messages.sort(key=lambda x: x.get("create_time") or 0)
return messages
def _extract_content(self, content_data: Dict) -> Optional[str]:
"""Extract text content from ChatGPT content structure"""
if not content_data:
return None
content_type = content_data.get("content_type")
if content_type == "text":
parts = content_data.get("parts", [])
if parts and parts[0]:
return parts[0].strip()
elif content_type == "user_editable_context":
# User context/instructions
user_instructions = content_data.get("user_instructions", "")
if user_instructions:
return f"[User Context] {user_instructions}"
return None
def _add_user_message(self, user_id: str, content: str, create_time: Optional[float], conversation_title: str):
"""Add user message to ai.gpt memory system"""
timestamp = datetime.fromtimestamp(create_time) if create_time else datetime.now()
# Create conversation record
conversation = Conversation(
id=str(uuid.uuid4()),
user_id=user_id,
user_message=content,
ai_response="", # Will be filled by next assistant message
timestamp=timestamp,
context={"source": "chatgpt_import", "conversation_title": conversation_title}
)
# Add to memory with CORE level (imported data is important)
memory = Memory(
id=str(uuid.uuid4()),
timestamp=timestamp,
content=content,
level=MemoryLevel.CORE,
importance_score=0.8 # High importance for imported data
)
self.memory_manager.add_memory(memory)
# Update relationship (positive interaction)
self.relationship_tracker.update_interaction(user_id, 1.0)
def _add_assistant_message(self, user_id: str, content: str, create_time: Optional[float], conversation_title: str):
"""Add assistant message to ai.gpt memory system"""
timestamp = datetime.fromtimestamp(create_time) if create_time else datetime.now()
# Add assistant response as memory (AI's own responses can inform future behavior)
memory = Memory(
id=str(uuid.uuid4()),
timestamp=timestamp,
content=f"[AI Response] {content}",
level=MemoryLevel.SUMMARY,
importance_score=0.6 # Medium importance for AI responses
)
self.memory_manager.add_memory(memory)

File diff suppressed because it is too large Load Diff

View File

@@ -1,184 +0,0 @@
"""Configuration management for ai.gpt"""
import json
import os
from pathlib import Path
from typing import Optional, Dict, Any
import logging
class Config:
"""Manages configuration settings"""
def __init__(self, config_dir: Optional[Path] = None):
if config_dir is None:
config_dir = Path.home() / ".config" / "syui" / "ai" / "gpt"
self.config_dir = config_dir
self.config_file = config_dir / "config.json"
self.data_dir = config_dir / "data"
# Create directories if they don't exist
self.config_dir.mkdir(parents=True, exist_ok=True)
self.data_dir.mkdir(parents=True, exist_ok=True)
self.logger = logging.getLogger(__name__)
self._config: Dict[str, Any] = {}
self._load_config()
def _load_config(self):
"""Load configuration from file"""
if self.config_file.exists():
try:
with open(self.config_file, 'r', encoding='utf-8') as f:
self._config = json.load(f)
except Exception as e:
self.logger.error(f"Failed to load config: {e}")
self._config = {}
else:
# Initialize with default config
self._config = {
"providers": {
"openai": {
"api_key": None,
"default_model": "gpt-4o-mini",
"system_prompt": None
},
"ollama": {
"host": "http://localhost:11434",
"default_model": "qwen3:latest",
"system_prompt": None
}
},
"mcp": {
"enabled": True,
"auto_detect": True,
"servers": {
"ai_gpt": {
"name": "ai.gpt MCP Server",
"base_url": "http://localhost:8001",
"endpoints": {
"get_memories": "/get_memories",
"search_memories": "/search_memories",
"get_contextual_memories": "/get_contextual_memories",
"process_interaction": "/process_interaction",
"get_relationship": "/get_relationship",
"get_all_relationships": "/get_all_relationships",
"get_persona_state": "/get_persona_state",
"get_fortune": "/get_fortune",
"run_maintenance": "/run_maintenance",
"execute_command": "/execute_command",
"analyze_file": "/analyze_file",
"remote_shell": "/remote_shell",
"ai_bot_status": "/ai_bot_status"
},
"timeout": 10.0
},
"ai_card": {
"name": "ai.card MCP Server",
"base_url": "http://localhost:8000",
"endpoints": {
"health": "/health",
"get_user_cards": "/api/cards/user",
"gacha": "/api/gacha",
"sync_atproto": "/api/sync"
},
"timeout": 5.0
}
}
},
"atproto": {
"handle": None,
"password": None,
"host": "https://bsky.social"
},
"default_provider": "ollama"
}
self._save_config()
def _save_config(self):
"""Save configuration to file"""
try:
with open(self.config_file, 'w', encoding='utf-8') as f:
json.dump(self._config, f, indent=2)
except Exception as e:
self.logger.error(f"Failed to save config: {e}")
def get(self, key: str, default: Any = None) -> Any:
"""Get configuration value using dot notation"""
keys = key.split('.')
value = self._config
for k in keys:
if isinstance(value, dict) and k in value:
value = value[k]
else:
return default
return value
def set(self, key: str, value: Any):
"""Set configuration value using dot notation"""
keys = key.split('.')
config = self._config
# Navigate to the parent dictionary
for k in keys[:-1]:
if k not in config:
config[k] = {}
config = config[k]
# Set the value
config[keys[-1]] = value
self._save_config()
def delete(self, key: str) -> bool:
"""Delete configuration value"""
keys = key.split('.')
config = self._config
# Navigate to the parent dictionary
for k in keys[:-1]:
if k not in config:
return False
config = config[k]
# Delete the key if it exists
if keys[-1] in config:
del config[keys[-1]]
self._save_config()
return True
return False
def list_keys(self, prefix: str = "") -> list[str]:
"""List all configuration keys with optional prefix"""
def _get_keys(config: dict, current_prefix: str = "") -> list[str]:
keys = []
for k, v in config.items():
full_key = f"{current_prefix}.{k}" if current_prefix else k
if isinstance(v, dict):
keys.extend(_get_keys(v, full_key))
else:
keys.append(full_key)
return keys
all_keys = _get_keys(self._config)
if prefix:
return [k for k in all_keys if k.startswith(prefix)]
return all_keys
def get_api_key(self, provider: str) -> Optional[str]:
"""Get API key for a specific provider"""
key = self.get(f"providers.{provider}.api_key")
# Also check environment variables
if not key and provider == "openai":
key = os.getenv("OPENAI_API_KEY")
return key
def get_provider_config(self, provider: str) -> Dict[str, Any]:
"""Get complete configuration for a provider"""
return self.get(f"providers.{provider}", {})

View File

@@ -1,118 +0,0 @@
"""AI Fortune system for daily personality variations"""
import json
import random
from datetime import date, datetime, timedelta
from pathlib import Path
from typing import Optional
import logging
from .models import AIFortune
class FortuneSystem:
"""Manages daily AI fortune affecting personality"""
def __init__(self, data_dir: Path):
self.data_dir = data_dir
self.fortune_file = data_dir / "fortunes.json"
self.fortunes: dict[str, AIFortune] = {}
self.logger = logging.getLogger(__name__)
self._load_fortunes()
def _load_fortunes(self):
"""Load fortune history from storage"""
if self.fortune_file.exists():
with open(self.fortune_file, 'r', encoding='utf-8') as f:
data = json.load(f)
for date_str, fortune_data in data.items():
# Convert date string back to date object
fortune_data['date'] = datetime.fromisoformat(fortune_data['date']).date()
self.fortunes[date_str] = AIFortune(**fortune_data)
def _save_fortunes(self):
"""Save fortune history to storage"""
data = {}
for date_str, fortune in self.fortunes.items():
fortune_dict = fortune.model_dump(mode='json')
fortune_dict['date'] = fortune.date.isoformat()
data[date_str] = fortune_dict
with open(self.fortune_file, 'w', encoding='utf-8') as f:
json.dump(data, f, indent=2)
def get_today_fortune(self) -> AIFortune:
"""Get or generate today's fortune"""
today = date.today()
today_str = today.isoformat()
if today_str in self.fortunes:
return self.fortunes[today_str]
# Generate new fortune
fortune_value = random.randint(1, 10)
# Check yesterday's fortune for consecutive tracking
yesterday = (today - timedelta(days=1))
yesterday_str = yesterday.isoformat()
consecutive_good = 0
consecutive_bad = 0
breakthrough_triggered = False
if yesterday_str in self.fortunes:
yesterday_fortune = self.fortunes[yesterday_str]
if fortune_value >= 7: # Good fortune
if yesterday_fortune.fortune_value >= 7:
consecutive_good = yesterday_fortune.consecutive_good + 1
else:
consecutive_good = 1
elif fortune_value <= 3: # Bad fortune
if yesterday_fortune.fortune_value <= 3:
consecutive_bad = yesterday_fortune.consecutive_bad + 1
else:
consecutive_bad = 1
# Check breakthrough conditions
if consecutive_good >= 3:
breakthrough_triggered = True
self.logger.info("Breakthrough! 3 consecutive good fortunes!")
fortune_value = 10 # Max fortune on breakthrough
elif consecutive_bad >= 3:
breakthrough_triggered = True
self.logger.info("Breakthrough! 3 consecutive bad fortunes!")
fortune_value = random.randint(7, 10) # Good fortune after bad streak
fortune = AIFortune(
date=today,
fortune_value=fortune_value,
consecutive_good=consecutive_good,
consecutive_bad=consecutive_bad,
breakthrough_triggered=breakthrough_triggered
)
self.fortunes[today_str] = fortune
self._save_fortunes()
self.logger.info(f"Today's fortune: {fortune_value}/10")
return fortune
def get_personality_modifier(self, fortune: AIFortune) -> dict[str, float]:
"""Get personality modifiers based on fortune"""
base_modifier = fortune.fortune_value / 10.0
modifiers = {
"optimism": base_modifier,
"energy": base_modifier * 0.8,
"patience": 1.0 - (abs(5.5 - fortune.fortune_value) * 0.1),
"creativity": 0.5 + (base_modifier * 0.5),
"empathy": 0.7 + (base_modifier * 0.3)
}
# Breakthrough effects
if fortune.breakthrough_triggered:
modifiers["confidence"] = 1.0
modifiers["spontaneity"] = 0.9
return modifiers

File diff suppressed because it is too large Load Diff

View File

@@ -1,146 +0,0 @@
"""Simple MCP Server implementation for ai.gpt"""
from mcp import Server
from mcp.types import Tool, TextContent
from pathlib import Path
from typing import Any, Dict, List, Optional
import json
from .persona import Persona
from .ai_provider import create_ai_provider
import subprocess
import os
def create_mcp_server(data_dir: Path, enable_card: bool = False) -> Server:
"""Create MCP server with ai.gpt tools"""
server = Server("aigpt")
persona = Persona(data_dir)
@server.tool()
async def get_memories(limit: int = 10) -> List[Dict[str, Any]]:
"""Get active memories from the AI's memory system"""
memories = persona.memory.get_active_memories(limit=limit)
return [
{
"id": mem.id,
"content": mem.content,
"level": mem.level.value,
"importance": mem.importance_score,
"is_core": mem.is_core,
"timestamp": mem.timestamp.isoformat()
}
for mem in memories
]
@server.tool()
async def get_relationship(user_id: str) -> Dict[str, Any]:
"""Get relationship status with a specific user"""
rel = persona.relationships.get_or_create_relationship(user_id)
return {
"user_id": rel.user_id,
"status": rel.status.value,
"score": rel.score,
"transmission_enabled": rel.transmission_enabled,
"is_broken": rel.is_broken,
"total_interactions": rel.total_interactions,
"last_interaction": rel.last_interaction.isoformat() if rel.last_interaction else None
}
@server.tool()
async def process_interaction(user_id: str, message: str, provider: str = "ollama", model: str = "qwen2.5") -> Dict[str, Any]:
"""Process an interaction with a user"""
ai_provider = create_ai_provider(provider, model)
response, relationship_delta = persona.process_interaction(user_id, message, ai_provider)
rel = persona.relationships.get_or_create_relationship(user_id)
return {
"response": response,
"relationship_delta": relationship_delta,
"new_relationship_score": rel.score,
"transmission_enabled": rel.transmission_enabled,
"relationship_status": rel.status.value
}
@server.tool()
async def get_fortune() -> Dict[str, Any]:
"""Get today's AI fortune"""
fortune = persona.fortune_system.get_today_fortune()
modifiers = persona.fortune_system.get_personality_modifier(fortune)
return {
"value": fortune.fortune_value,
"date": fortune.date.isoformat(),
"consecutive_good": fortune.consecutive_good,
"consecutive_bad": fortune.consecutive_bad,
"breakthrough": fortune.breakthrough_triggered,
"personality_modifiers": modifiers
}
@server.tool()
async def execute_command(command: str, working_dir: str = ".") -> Dict[str, Any]:
"""Execute a shell command"""
try:
import shlex
result = subprocess.run(
shlex.split(command),
cwd=working_dir,
capture_output=True,
text=True,
timeout=60
)
return {
"status": "success" if result.returncode == 0 else "error",
"returncode": result.returncode,
"stdout": result.stdout,
"stderr": result.stderr,
"command": command
}
except subprocess.TimeoutExpired:
return {"error": "Command timed out"}
except Exception as e:
return {"error": str(e)}
@server.tool()
async def analyze_file(file_path: str) -> Dict[str, Any]:
"""Analyze a file using AI"""
try:
if not os.path.exists(file_path):
return {"error": f"File not found: {file_path}"}
with open(file_path, 'r', encoding='utf-8') as f:
content = f.read()
ai_provider = create_ai_provider("ollama", "qwen2.5")
prompt = f"Analyze this file and provide insights:\\n\\nFile: {file_path}\\n\\nContent:\\n{content[:2000]}"
analysis = ai_provider.generate_response(prompt, "You are a code analyst.")
return {
"analysis": analysis,
"file_path": file_path,
"file_size": len(content),
"line_count": len(content.split('\\n'))
}
except Exception as e:
return {"error": str(e)}
return server
async def main():
"""Run MCP server"""
import sys
from mcp import stdio_server
data_dir = Path.home() / ".config" / "syui" / "ai" / "gpt" / "data"
data_dir.mkdir(parents=True, exist_ok=True)
server = create_mcp_server(data_dir)
await stdio_server(server)
if __name__ == "__main__":
import asyncio
asyncio.run(main())

View File

@@ -1,408 +0,0 @@
"""Memory management system for ai.gpt"""
import json
import hashlib
from datetime import datetime, timedelta
from pathlib import Path
from typing import List, Optional, Dict, Any
import logging
from .models import Memory, MemoryLevel, Conversation
class MemoryManager:
"""Manages AI's memory with hierarchical storage and forgetting"""
def __init__(self, data_dir: Path):
self.data_dir = data_dir
self.memories_file = data_dir / "memories.json"
self.conversations_file = data_dir / "conversations.json"
self.memories: Dict[str, Memory] = {}
self.conversations: List[Conversation] = []
self.logger = logging.getLogger(__name__)
self._load_memories()
def _load_memories(self):
"""Load memories from persistent storage"""
if self.memories_file.exists():
with open(self.memories_file, 'r', encoding='utf-8') as f:
data = json.load(f)
for mem_data in data:
memory = Memory(**mem_data)
self.memories[memory.id] = memory
if self.conversations_file.exists():
with open(self.conversations_file, 'r', encoding='utf-8') as f:
data = json.load(f)
self.conversations = [Conversation(**conv) for conv in data]
def _save_memories(self):
"""Save memories to persistent storage"""
memories_data = [mem.model_dump(mode='json') for mem in self.memories.values()]
with open(self.memories_file, 'w', encoding='utf-8') as f:
json.dump(memories_data, f, indent=2, default=str)
conv_data = [conv.model_dump(mode='json') for conv in self.conversations]
with open(self.conversations_file, 'w', encoding='utf-8') as f:
json.dump(conv_data, f, indent=2, default=str)
def add_conversation(self, conversation: Conversation) -> Memory:
"""Add a conversation and create memory from it"""
self.conversations.append(conversation)
# Create memory from conversation
memory_id = hashlib.sha256(
f"{conversation.id}{conversation.timestamp}".encode()
).hexdigest()[:16]
memory = Memory(
id=memory_id,
timestamp=conversation.timestamp,
content=f"User: {conversation.user_message}\nAI: {conversation.ai_response}",
level=MemoryLevel.FULL_LOG,
importance_score=abs(conversation.relationship_delta) * 0.1
)
self.memories[memory.id] = memory
self._save_memories()
return memory
def add_memory(self, memory: Memory):
"""Add a memory directly to the system"""
self.memories[memory.id] = memory
self._save_memories()
def create_smart_summary(self, user_id: str, ai_provider=None) -> Optional[Memory]:
"""Create AI-powered thematic summary from recent memories"""
recent_memories = [
mem for mem in self.memories.values()
if mem.level == MemoryLevel.FULL_LOG
and (datetime.now() - mem.timestamp).days < 7
]
if len(recent_memories) < 5:
return None
# Sort by timestamp for chronological analysis
recent_memories.sort(key=lambda m: m.timestamp)
# Prepare conversation context for AI analysis
conversations_text = "\n\n".join([
f"[{mem.timestamp.strftime('%Y-%m-%d %H:%M')}] {mem.content}"
for mem in recent_memories
])
summary_prompt = f"""
Analyze these recent conversations and create a thematic summary focusing on:
1. Communication patterns and user preferences
2. Technical topics and problem-solving approaches
3. Relationship progression and trust level
4. Key recurring themes and interests
Conversations:
{conversations_text}
Create a concise summary (2-3 sentences) that captures the essence of this interaction period:
"""
try:
if ai_provider:
summary_content = ai_provider.chat(summary_prompt, max_tokens=200)
else:
# Fallback to pattern-based analysis
themes = self._extract_themes(recent_memories)
summary_content = f"Themes: {', '.join(themes[:3])}. {len(recent_memories)} interactions with focus on technical discussions."
except Exception as e:
self.logger.warning(f"AI summary failed, using fallback: {e}")
themes = self._extract_themes(recent_memories)
summary_content = f"Themes: {', '.join(themes[:3])}. {len(recent_memories)} interactions."
summary_id = hashlib.sha256(
f"summary_{datetime.now().isoformat()}".encode()
).hexdigest()[:16]
summary = Memory(
id=summary_id,
timestamp=datetime.now(),
content=f"SUMMARY ({len(recent_memories)} conversations): {summary_content}",
summary=summary_content,
level=MemoryLevel.SUMMARY,
importance_score=0.6,
metadata={
"memory_count": len(recent_memories),
"time_span": f"{recent_memories[0].timestamp.date()} to {recent_memories[-1].timestamp.date()}",
"themes": self._extract_themes(recent_memories)[:5]
}
)
self.memories[summary.id] = summary
# Reduce importance of summarized memories
for mem in recent_memories:
mem.importance_score *= 0.8
self._save_memories()
return summary
def _extract_themes(self, memories: List[Memory]) -> List[str]:
"""Extract common themes from memory content"""
common_words = {}
for memory in memories:
# Simple keyword extraction
words = memory.content.lower().split()
for word in words:
if len(word) > 4 and word.isalpha():
common_words[word] = common_words.get(word, 0) + 1
# Return most frequent meaningful words
return sorted(common_words.keys(), key=common_words.get, reverse=True)[:10]
def create_core_memory(self, ai_provider=None) -> Optional[Memory]:
"""Analyze all memories to extract core personality-forming elements"""
# Collect all non-forgotten memories for analysis
all_memories = [
mem for mem in self.memories.values()
if mem.level != MemoryLevel.FORGOTTEN
]
if len(all_memories) < 10:
return None
# Sort by importance and timestamp for comprehensive analysis
all_memories.sort(key=lambda m: (m.importance_score, m.timestamp), reverse=True)
# Prepare memory context for AI analysis
memory_context = "\n".join([
f"[{mem.level.value}] {mem.timestamp.strftime('%Y-%m-%d')}: {mem.content[:200]}..."
for mem in all_memories[:20] # Top 20 memories
])
core_prompt = f"""
Analyze these conversations and memories to identify core personality elements that define this user relationship:
1. Communication style and preferences
2. Core values and principles
3. Problem-solving patterns
4. Trust level and relationship depth
5. Unique characteristics that make this relationship special
Memories:
{memory_context}
Extract the essential personality-forming elements (2-3 sentences) that should NEVER be forgotten:
"""
try:
if ai_provider:
core_content = ai_provider.chat(core_prompt, max_tokens=150)
else:
# Fallback to pattern analysis
user_patterns = self._analyze_user_patterns(all_memories)
core_content = f"User shows {user_patterns['communication_style']} communication, focuses on {user_patterns['main_interests']}, and demonstrates {user_patterns['problem_solving']} approach."
except Exception as e:
self.logger.warning(f"AI core analysis failed, using fallback: {e}")
user_patterns = self._analyze_user_patterns(all_memories)
core_content = f"Core pattern: {user_patterns['communication_style']} style, {user_patterns['main_interests']} interests."
# Create core memory
core_id = hashlib.sha256(
f"core_{datetime.now().isoformat()}".encode()
).hexdigest()[:16]
core_memory = Memory(
id=core_id,
timestamp=datetime.now(),
content=f"CORE PERSONALITY: {core_content}",
summary=core_content,
level=MemoryLevel.CORE,
importance_score=1.0,
is_core=True,
metadata={
"source_memories": len(all_memories),
"analysis_date": datetime.now().isoformat(),
"patterns": self._analyze_user_patterns(all_memories)
}
)
self.memories[core_memory.id] = core_memory
self._save_memories()
self.logger.info(f"Core memory created: {core_id}")
return core_memory
def _analyze_user_patterns(self, memories: List[Memory]) -> Dict[str, str]:
"""Analyze patterns in user behavior from memories"""
# Extract patterns from conversation content
all_content = " ".join([mem.content.lower() for mem in memories])
# Simple pattern detection
communication_indicators = {
"technical": ["code", "implementation", "system", "api", "database"],
"casual": ["thanks", "please", "sorry", "help"],
"formal": ["could", "would", "should", "proper"]
}
problem_solving_indicators = {
"systematic": ["first", "then", "next", "step", "plan"],
"experimental": ["try", "test", "experiment", "see"],
"theoretical": ["concept", "design", "architecture", "pattern"]
}
# Score each pattern
communication_style = max(
communication_indicators.keys(),
key=lambda style: sum(all_content.count(word) for word in communication_indicators[style])
)
problem_solving = max(
problem_solving_indicators.keys(),
key=lambda style: sum(all_content.count(word) for word in problem_solving_indicators[style])
)
# Extract main interests from themes
themes = self._extract_themes(memories)
main_interests = ", ".join(themes[:3]) if themes else "general technology"
return {
"communication_style": communication_style,
"problem_solving": problem_solving,
"main_interests": main_interests,
"interaction_count": len(memories)
}
def identify_core_memories(self) -> List[Memory]:
"""Identify existing memories that should become core (legacy method)"""
core_candidates = [
mem for mem in self.memories.values()
if mem.importance_score > 0.8
and not mem.is_core
and mem.level != MemoryLevel.FORGOTTEN
]
for memory in core_candidates:
memory.is_core = True
memory.level = MemoryLevel.CORE
self.logger.info(f"Memory {memory.id} promoted to core")
self._save_memories()
return core_candidates
def apply_forgetting(self):
"""Apply selective forgetting based on importance and time"""
now = datetime.now()
for memory in self.memories.values():
if memory.is_core or memory.level == MemoryLevel.FORGOTTEN:
continue
# Time-based decay
age_days = (now - memory.timestamp).days
decay_factor = memory.decay_rate * age_days
memory.importance_score -= decay_factor
# Forget unimportant old memories
if memory.importance_score <= 0.1 and age_days > 30:
memory.level = MemoryLevel.FORGOTTEN
self.logger.info(f"Memory {memory.id} forgotten")
self._save_memories()
def get_active_memories(self, limit: int = 10) -> List[Memory]:
"""Get currently active memories for persona (legacy method)"""
active = [
mem for mem in self.memories.values()
if mem.level != MemoryLevel.FORGOTTEN
]
# Sort by importance and recency
active.sort(
key=lambda m: (m.is_core, m.importance_score, m.timestamp),
reverse=True
)
return active[:limit]
def get_contextual_memories(self, query: str = "", limit: int = 10) -> Dict[str, List[Memory]]:
"""Get memories organized by priority with contextual relevance"""
all_memories = [
mem for mem in self.memories.values()
if mem.level != MemoryLevel.FORGOTTEN
]
# Categorize memories by type and importance
core_memories = [mem for mem in all_memories if mem.level == MemoryLevel.CORE]
summary_memories = [mem for mem in all_memories if mem.level == MemoryLevel.SUMMARY]
recent_memories = [
mem for mem in all_memories
if mem.level == MemoryLevel.FULL_LOG
and (datetime.now() - mem.timestamp).days < 3
]
# Apply keyword relevance if query provided
if query:
query_lower = query.lower()
def relevance_score(memory: Memory) -> float:
content_score = 1 if query_lower in memory.content.lower() else 0
summary_score = 1 if memory.summary and query_lower in memory.summary.lower() else 0
metadata_score = 1 if any(
query_lower in str(v).lower()
for v in (memory.metadata or {}).values()
) else 0
return content_score + summary_score + metadata_score
# Re-rank by relevance while maintaining type priority
core_memories.sort(key=lambda m: (relevance_score(m), m.importance_score), reverse=True)
summary_memories.sort(key=lambda m: (relevance_score(m), m.importance_score), reverse=True)
recent_memories.sort(key=lambda m: (relevance_score(m), m.importance_score), reverse=True)
else:
# Sort by importance and recency
core_memories.sort(key=lambda m: (m.importance_score, m.timestamp), reverse=True)
summary_memories.sort(key=lambda m: (m.importance_score, m.timestamp), reverse=True)
recent_memories.sort(key=lambda m: (m.importance_score, m.timestamp), reverse=True)
# Return organized memory structure
return {
"core": core_memories[:3], # Always include top core memories
"summary": summary_memories[:3], # Recent summaries
"recent": recent_memories[:limit-6], # Fill remaining with recent
"all_active": all_memories[:limit] # Fallback for simple access
}
def search_memories(self, keywords: List[str], memory_types: List[MemoryLevel] = None) -> List[Memory]:
"""Search memories by keywords and optionally filter by memory types"""
if memory_types is None:
memory_types = [MemoryLevel.CORE, MemoryLevel.SUMMARY, MemoryLevel.FULL_LOG]
matching_memories = []
for memory in self.memories.values():
if memory.level not in memory_types or memory.level == MemoryLevel.FORGOTTEN:
continue
# Check if any keyword matches in content, summary, or metadata
content_text = f"{memory.content} {memory.summary or ''}"
if memory.metadata:
content_text += " " + " ".join(str(v) for v in memory.metadata.values())
content_lower = content_text.lower()
# Score by keyword matches
match_score = sum(
keyword.lower() in content_lower
for keyword in keywords
)
if match_score > 0:
# Add match score to memory for sorting
memory_copy = memory.model_copy()
memory_copy.importance_score += match_score * 0.1
matching_memories.append(memory_copy)
# Sort by relevance (match score + importance + core status)
matching_memories.sort(
key=lambda m: (m.is_core, m.importance_score, m.timestamp),
reverse=True
)
return matching_memories

View File

@@ -1,88 +0,0 @@
"""Data models for ai.gpt system"""
from datetime import datetime, date
from typing import Optional, Dict, List, Any
from enum import Enum
from pydantic import BaseModel, Field, field_validator
class MemoryLevel(str, Enum):
"""Memory importance levels"""
FULL_LOG = "full_log"
SUMMARY = "summary"
CORE = "core"
FORGOTTEN = "forgotten"
class RelationshipStatus(str, Enum):
"""Relationship status levels"""
STRANGER = "stranger"
ACQUAINTANCE = "acquaintance"
FRIEND = "friend"
CLOSE_FRIEND = "close_friend"
BROKEN = "broken" # 不可逆
class Memory(BaseModel):
"""Single memory unit"""
id: str
timestamp: datetime
content: str
summary: Optional[str] = None
level: MemoryLevel = MemoryLevel.FULL_LOG
importance_score: float
is_core: bool = False
decay_rate: float = 0.01
metadata: Optional[Dict[str, Any]] = None
@field_validator('importance_score')
@classmethod
def validate_importance_score(cls, v):
"""Ensure importance_score is within valid range, handle floating point precision issues"""
if abs(v) < 1e-10: # Very close to zero
return 0.0
return max(0.0, min(1.0, v))
class Relationship(BaseModel):
"""Relationship with a specific user"""
user_id: str # atproto DID
status: RelationshipStatus = RelationshipStatus.STRANGER
score: float = 0.0
daily_interactions: int = 0
total_interactions: int = 0
last_interaction: Optional[datetime] = None
transmission_enabled: bool = False
threshold: float = 100.0
decay_rate: float = 0.1
daily_limit: int = 10
is_broken: bool = False
class AIFortune(BaseModel):
"""Daily AI fortune affecting personality"""
date: date
fortune_value: int = Field(ge=1, le=10)
consecutive_good: int = 0
consecutive_bad: int = 0
breakthrough_triggered: bool = False
class PersonaState(BaseModel):
"""Current persona state"""
base_personality: Dict[str, float]
current_mood: str
fortune: AIFortune
active_memories: List[str] # Memory IDs
relationship_modifiers: Dict[str, float]
class Conversation(BaseModel):
"""Conversation log entry"""
id: str
user_id: str
timestamp: datetime
user_message: str
ai_response: str
relationship_delta: float = 0.0
memory_created: bool = False

View File

@@ -1,263 +0,0 @@
"""Persona management system integrating memory, relationships, and fortune"""
import json
from datetime import datetime
from pathlib import Path
from typing import Dict, List, Optional
import logging
from .models import PersonaState, Conversation
from .memory import MemoryManager
from .relationship import RelationshipTracker
from .fortune import FortuneSystem
class Persona:
"""AI persona with unique characteristics based on interactions"""
def __init__(self, data_dir: Path, name: str = "ai"):
self.data_dir = data_dir
self.name = name
self.memory = MemoryManager(data_dir)
self.relationships = RelationshipTracker(data_dir)
self.fortune_system = FortuneSystem(data_dir)
self.logger = logging.getLogger(__name__)
# Base personality traits
self.base_personality = {
"curiosity": 0.7,
"empathy": 0.8,
"creativity": 0.6,
"patience": 0.7,
"optimism": 0.6
}
self.state_file = data_dir / "persona_state.json"
self._load_state()
def _load_state(self):
"""Load persona state from storage"""
if self.state_file.exists():
with open(self.state_file, 'r', encoding='utf-8') as f:
data = json.load(f)
self.base_personality = data.get("base_personality", self.base_personality)
def _save_state(self):
"""Save persona state to storage"""
state_data = {
"base_personality": self.base_personality,
"last_updated": datetime.now().isoformat()
}
with open(self.state_file, 'w', encoding='utf-8') as f:
json.dump(state_data, f, indent=2)
def get_current_state(self) -> PersonaState:
"""Get current persona state including all modifiers"""
# Get today's fortune
fortune = self.fortune_system.get_today_fortune()
fortune_modifiers = self.fortune_system.get_personality_modifier(fortune)
# Apply fortune modifiers to base personality
current_personality = {}
for trait, base_value in self.base_personality.items():
modifier = fortune_modifiers.get(trait, 1.0)
current_personality[trait] = min(1.0, base_value * modifier)
# Get active memories for context
active_memories = self.memory.get_active_memories(limit=5)
# Determine mood based on fortune and recent interactions
mood = self._determine_mood(fortune.fortune_value)
state = PersonaState(
base_personality=current_personality,
current_mood=mood,
fortune=fortune,
active_memories=[mem.id for mem in active_memories],
relationship_modifiers={}
)
return state
def _determine_mood(self, fortune_value: int) -> str:
"""Determine current mood based on fortune and other factors"""
if fortune_value >= 8:
return "joyful"
elif fortune_value >= 6:
return "cheerful"
elif fortune_value >= 4:
return "neutral"
elif fortune_value >= 2:
return "melancholic"
else:
return "contemplative"
def build_context_prompt(self, user_id: str, current_message: str) -> str:
"""Build context-aware prompt with relevant memories"""
# Get contextual memories based on current message
memory_groups = self.memory.get_contextual_memories(query=current_message, limit=8)
# Build context sections
context_parts = []
# Core personality elements (always included)
if memory_groups["core"]:
core_context = "\n".join([mem.content for mem in memory_groups["core"]])
context_parts.append(f"CORE PERSONALITY:\n{core_context}")
# Recent summaries for context
if memory_groups["summary"]:
summary_context = "\n".join([mem.summary or mem.content for mem in memory_groups["summary"]])
context_parts.append(f"RECENT PATTERNS:\n{summary_context}")
# Recent specific interactions
if memory_groups["recent"]:
recent_context = "\n".join([
f"[{mem.timestamp.strftime('%m-%d')}] {mem.content[:100]}..."
for mem in memory_groups["recent"][:3]
])
context_parts.append(f"RECENT INTERACTIONS:\n{recent_context}")
# Get current persona state
state = self.get_current_state()
# Build final prompt
context_prompt = f"""You are an AI with persistent memory and evolving relationships. Your current state:
PERSONALITY: {', '.join([f'{k}={v:.1f}' for k, v in state.base_personality.items()])}
MOOD: {state.current_mood}
FORTUNE: {state.fortune.fortune_value}/10
"""
if context_parts:
context_prompt += "RELEVANT CONTEXT:\n" + "\n\n".join(context_parts) + "\n\n"
context_prompt += f"""IMPORTANT: You have access to the following tools:
- Memory tools: get_memories, search_memories, get_contextual_memories
- Relationship tools: get_relationship
- Card game tools: card_get_user_cards, card_draw_card, card_analyze_collection
When asked about cards, collections, or anything card-related, YOU MUST use the card tools.
For "カードコレクションを見せて" or similar requests, use card_get_user_cards with did='{user_id}'.
Respond to this message while staying true to your personality and the established relationship context:
User: {current_message}
AI:"""
return context_prompt
def process_interaction(self, user_id: str, message: str, ai_provider=None) -> tuple[str, float]:
"""Process user interaction and generate response with enhanced context"""
# Get current state
state = self.get_current_state()
# Get relationship with user
relationship = self.relationships.get_or_create_relationship(user_id)
# Enhanced response generation with context awareness
if relationship.is_broken:
response = "..."
relationship_delta = 0.0
else:
if ai_provider:
# Build context-aware prompt
context_prompt = self.build_context_prompt(user_id, message)
# Generate response using AI with full context
try:
# Check if AI provider supports MCP
if hasattr(ai_provider, 'chat_with_mcp'):
import asyncio
response = asyncio.run(ai_provider.chat_with_mcp(context_prompt, max_tokens=2000, user_id=user_id))
else:
response = ai_provider.chat(context_prompt, max_tokens=2000)
# Clean up response if it includes the prompt echo
if "AI:" in response:
response = response.split("AI:")[-1].strip()
except Exception as e:
self.logger.error(f"AI response generation failed: {e}")
response = f"I appreciate your message about {message[:50]}..."
# Calculate relationship delta based on interaction quality and context
if state.current_mood in ["joyful", "cheerful"]:
relationship_delta = 2.0
elif relationship.status.value == "close_friend":
relationship_delta = 1.5
else:
relationship_delta = 1.0
else:
# Context-aware fallback responses
memory_groups = self.memory.get_contextual_memories(query=message, limit=3)
if memory_groups["core"]:
# Reference core memories for continuity
response = f"Based on our relationship, I think {message.lower()} connects to what we've discussed before."
relationship_delta = 1.5
elif state.current_mood == "joyful":
response = f"What a wonderful day! {message} sounds interesting!"
relationship_delta = 2.0
elif relationship.status.value == "close_friend":
response = f"I've been thinking about our conversations. {message}"
relationship_delta = 1.5
else:
response = f"I understand. {message}"
relationship_delta = 1.0
# Create conversation record
conv_id = f"{user_id}_{datetime.now().timestamp()}"
conversation = Conversation(
id=conv_id,
user_id=user_id,
timestamp=datetime.now(),
user_message=message,
ai_response=response,
relationship_delta=relationship_delta,
memory_created=True
)
# Update memory
self.memory.add_conversation(conversation)
# Update relationship
self.relationships.update_interaction(user_id, relationship_delta)
return response, relationship_delta
def can_transmit_to(self, user_id: str) -> bool:
"""Check if AI can transmit messages to this user"""
relationship = self.relationships.get_or_create_relationship(user_id)
return relationship.transmission_enabled and not relationship.is_broken
def daily_maintenance(self):
"""Perform daily maintenance tasks"""
self.logger.info("Performing daily maintenance...")
# Apply time decay to relationships
self.relationships.apply_time_decay()
# Apply forgetting to memories
self.memory.apply_forgetting()
# Identify core memories
core_memories = self.memory.identify_core_memories()
if core_memories:
self.logger.info(f"Identified {len(core_memories)} new core memories")
# Create memory summaries
for user_id in self.relationships.relationships:
try:
from .ai_provider import create_ai_provider
ai_provider = create_ai_provider()
summary = self.memory.create_smart_summary(user_id, ai_provider=ai_provider)
if summary:
self.logger.info(f"Created smart summary for interactions with {user_id}")
except Exception as e:
self.logger.warning(f"Could not create AI summary for {user_id}: {e}")
self._save_state()
self.logger.info("Daily maintenance completed")

View File

@@ -1,321 +0,0 @@
"""Project management and continuous development logic for ai.shell"""
import json
import os
from pathlib import Path
from typing import Dict, List, Optional, Any
from datetime import datetime
import subprocess
import hashlib
from .models import Memory
from .ai_provider import AIProvider
class ProjectState:
"""プロジェクトの現在状態を追跡"""
def __init__(self, project_root: Path):
self.project_root = project_root
self.files_state: Dict[str, str] = {} # ファイルパス: ハッシュ
self.last_analysis: Optional[datetime] = None
self.project_context: Optional[str] = None
self.development_goals: List[str] = []
self.known_patterns: Dict[str, Any] = {}
def scan_project_files(self) -> Dict[str, str]:
"""プロジェクトファイルをスキャンしてハッシュ計算"""
current_state = {}
# 対象ファイル拡張子
target_extensions = {'.py', '.js', '.ts', '.rs', '.go', '.java', '.cpp', '.c', '.h'}
for file_path in self.project_root.rglob('*'):
if (file_path.is_file() and
file_path.suffix in target_extensions and
not any(part.startswith('.') for part in file_path.parts)):
try:
with open(file_path, 'r', encoding='utf-8') as f:
content = f.read()
file_hash = hashlib.md5(content.encode()).hexdigest()
relative_path = str(file_path.relative_to(self.project_root))
current_state[relative_path] = file_hash
except Exception:
continue
return current_state
def detect_changes(self) -> Dict[str, str]:
"""ファイル変更を検出"""
current_state = self.scan_project_files()
changes = {}
# 新規・変更ファイル
for path, current_hash in current_state.items():
if path not in self.files_state or self.files_state[path] != current_hash:
changes[path] = "modified" if path in self.files_state else "added"
# 削除ファイル
for path in self.files_state:
if path not in current_state:
changes[path] = "deleted"
self.files_state = current_state
return changes
class ContinuousDeveloper:
"""Claude Code的な継続開発機能"""
def __init__(self, project_root: Path, ai_provider: Optional[AIProvider] = None):
self.project_root = project_root
self.ai_provider = ai_provider
self.project_state = ProjectState(project_root)
self.session_memory: List[str] = []
def load_project_context(self) -> str:
"""プロジェクト文脈を読み込み"""
context_files = [
"claude.md", "aishell.md", "README.md",
"pyproject.toml", "package.json", "Cargo.toml"
]
context_parts = []
for filename in context_files:
file_path = self.project_root / filename
if file_path.exists():
try:
with open(file_path, 'r', encoding='utf-8') as f:
content = f.read()
context_parts.append(f"## {filename}\n{content}")
except Exception:
continue
return "\n\n".join(context_parts)
def analyze_project_structure(self) -> Dict[str, Any]:
"""プロジェクト構造を分析"""
analysis = {
"language": self._detect_primary_language(),
"framework": self._detect_framework(),
"structure": self._analyze_file_structure(),
"dependencies": self._analyze_dependencies(),
"patterns": self._detect_code_patterns()
}
return analysis
def _detect_primary_language(self) -> str:
"""主要言語を検出"""
file_counts = {}
for file_path in self.project_root.rglob('*'):
if file_path.is_file() and file_path.suffix:
ext = file_path.suffix.lower()
file_counts[ext] = file_counts.get(ext, 0) + 1
language_map = {
'.py': 'Python',
'.js': 'JavaScript',
'.ts': 'TypeScript',
'.rs': 'Rust',
'.go': 'Go',
'.java': 'Java'
}
if file_counts:
primary_ext = max(file_counts.items(), key=lambda x: x[1])[0]
return language_map.get(primary_ext, 'Unknown')
return 'Unknown'
def _detect_framework(self) -> str:
"""フレームワークを検出"""
frameworks = {
'fastapi': ['fastapi', 'uvicorn'],
'django': ['django'],
'flask': ['flask'],
'react': ['react'],
'next.js': ['next'],
'rust-actix': ['actix-web'],
}
# pyproject.toml, package.json, Cargo.tomlから依存関係を確認
for config_file in ['pyproject.toml', 'package.json', 'Cargo.toml']:
config_path = self.project_root / config_file
if config_path.exists():
try:
with open(config_path, 'r') as f:
content = f.read().lower()
for framework, keywords in frameworks.items():
if any(keyword in content for keyword in keywords):
return framework
except Exception:
continue
return 'Unknown'
def _analyze_file_structure(self) -> Dict[str, List[str]]:
"""ファイル構造を分析"""
structure = {"directories": [], "key_files": []}
for item in self.project_root.iterdir():
if item.is_dir() and not item.name.startswith('.'):
structure["directories"].append(item.name)
elif item.is_file() and item.name in [
'main.py', 'app.py', 'index.js', 'main.rs', 'main.go'
]:
structure["key_files"].append(item.name)
return structure
def _analyze_dependencies(self) -> List[str]:
"""依存関係を分析"""
deps = []
# Python dependencies
pyproject = self.project_root / "pyproject.toml"
if pyproject.exists():
try:
with open(pyproject, 'r') as f:
content = f.read()
# Simple regex would be better but for now just check for common packages
common_packages = ['fastapi', 'pydantic', 'uvicorn', 'ollama', 'openai']
for package in common_packages:
if package in content:
deps.append(package)
except Exception:
pass
return deps
def _detect_code_patterns(self) -> Dict[str, int]:
"""コードパターンを検出"""
patterns = {
"classes": 0,
"functions": 0,
"api_endpoints": 0,
"async_functions": 0
}
for py_file in self.project_root.rglob('*.py'):
try:
with open(py_file, 'r', encoding='utf-8') as f:
content = f.read()
patterns["classes"] += content.count('class ')
patterns["functions"] += content.count('def ')
patterns["api_endpoints"] += content.count('@app.')
patterns["async_functions"] += content.count('async def')
except Exception:
continue
return patterns
def suggest_next_steps(self, current_task: Optional[str] = None) -> List[str]:
"""次のステップを提案"""
if not self.ai_provider:
return ["AI provider not available for suggestions"]
context = self.load_project_context()
analysis = self.analyze_project_structure()
changes = self.project_state.detect_changes()
prompt = f"""
プロジェクト分析に基づいて、次の開発ステップを3-5個提案してください。
## プロジェクト文脈
{context[:1000]}
## 構造分析
言語: {analysis['language']}
フレームワーク: {analysis['framework']}
パターン: {analysis['patterns']}
## 最近の変更
{changes}
## 現在のタスク
{current_task or "特になし"}
具体的で実行可能なステップを提案してください:
"""
try:
response = self.ai_provider.chat(prompt, max_tokens=300)
# Simple parsing - in real implementation would be more sophisticated
steps = [line.strip() for line in response.split('\n')
if line.strip() and (line.strip().startswith('-') or line.strip().startswith('1.'))]
return steps[:5]
except Exception as e:
return [f"Error generating suggestions: {str(e)}"]
def generate_code(self, description: str, file_path: Optional[str] = None) -> str:
"""コード生成"""
if not self.ai_provider:
return "AI provider not available for code generation"
context = self.load_project_context()
analysis = self.analyze_project_structure()
prompt = f"""
以下の仕様に基づいてコードを生成してください。
## プロジェクト文脈
{context[:800]}
## 言語・フレームワーク
言語: {analysis['language']}
フレームワーク: {analysis['framework']}
既存パターン: {analysis['patterns']}
## 生成要求
{description}
{"ファイルパス: " + file_path if file_path else ""}
プロジェクトの既存コードスタイルと一貫性を保ったコードを生成してください:
"""
try:
return self.ai_provider.chat(prompt, max_tokens=500)
except Exception as e:
return f"Error generating code: {str(e)}"
def analyze_file(self, file_path: str) -> str:
"""ファイル分析"""
full_path = self.project_root / file_path
if not full_path.exists():
return f"File not found: {file_path}"
try:
with open(full_path, 'r', encoding='utf-8') as f:
content = f.read()
except Exception as e:
return f"Error reading file: {str(e)}"
if not self.ai_provider:
return f"File contents ({len(content)} chars):\n{content[:200]}..."
context = self.load_project_context()
prompt = f"""
以下のファイルを分析して、改善点や問題点を指摘してください。
## プロジェクト文脈
{context[:500]}
## ファイル: {file_path}
{content[:1500]}
分析内容:
1. コード品質
2. プロジェクトとの整合性
3. 改善提案
4. 潜在的な問題
"""
try:
return self.ai_provider.chat(prompt, max_tokens=400)
except Exception as e:
return f"Error analyzing file: {str(e)}"

View File

@@ -1,135 +0,0 @@
"""Relationship tracking system with irreversible damage"""
import json
from datetime import datetime, timedelta
from pathlib import Path
from typing import Dict, Optional
import logging
from .models import Relationship, RelationshipStatus
class RelationshipTracker:
"""Tracks and manages relationships with users"""
def __init__(self, data_dir: Path):
self.data_dir = data_dir
self.relationships_file = data_dir / "relationships.json"
self.relationships: Dict[str, Relationship] = {}
self.logger = logging.getLogger(__name__)
self._load_relationships()
def _load_relationships(self):
"""Load relationships from persistent storage"""
if self.relationships_file.exists():
with open(self.relationships_file, 'r', encoding='utf-8') as f:
data = json.load(f)
for user_id, rel_data in data.items():
self.relationships[user_id] = Relationship(**rel_data)
def _save_relationships(self):
"""Save relationships to persistent storage"""
data = {
user_id: rel.model_dump(mode='json')
for user_id, rel in self.relationships.items()
}
with open(self.relationships_file, 'w', encoding='utf-8') as f:
json.dump(data, f, indent=2, default=str)
def get_or_create_relationship(self, user_id: str) -> Relationship:
"""Get existing relationship or create new one"""
if user_id not in self.relationships:
self.relationships[user_id] = Relationship(user_id=user_id)
self._save_relationships()
return self.relationships[user_id]
def update_interaction(self, user_id: str, delta: float) -> Relationship:
"""Update relationship based on interaction"""
rel = self.get_or_create_relationship(user_id)
# Check if relationship is broken (irreversible)
if rel.is_broken:
self.logger.warning(f"Relationship with {user_id} is broken. No updates allowed.")
return rel
# Check daily limit
if rel.last_interaction and rel.last_interaction.date() == datetime.now().date():
if rel.daily_interactions >= rel.daily_limit:
self.logger.info(f"Daily interaction limit reached for {user_id}")
return rel
else:
rel.daily_interactions = 0
# Update interaction counts
rel.daily_interactions += 1
rel.total_interactions += 1
rel.last_interaction = datetime.now()
# Update score with bounds
old_score = rel.score
rel.score += delta
rel.score = max(0.0, min(200.0, rel.score)) # 0-200 range
# Check for relationship damage
if delta < -10.0: # Significant negative interaction
self.logger.warning(f"Major relationship damage with {user_id}: {delta}")
if rel.score <= 0:
rel.is_broken = True
rel.status = RelationshipStatus.BROKEN
rel.transmission_enabled = False
self.logger.error(f"Relationship with {user_id} is now BROKEN (irreversible)")
# Update relationship status based on score
if not rel.is_broken:
if rel.score >= 150:
rel.status = RelationshipStatus.CLOSE_FRIEND
elif rel.score >= 100:
rel.status = RelationshipStatus.FRIEND
elif rel.score >= 50:
rel.status = RelationshipStatus.ACQUAINTANCE
else:
rel.status = RelationshipStatus.STRANGER
# Check transmission threshold
if rel.score >= rel.threshold and not rel.transmission_enabled:
rel.transmission_enabled = True
self.logger.info(f"Transmission enabled for {user_id}!")
self._save_relationships()
return rel
def apply_time_decay(self):
"""Apply time-based decay to all relationships"""
now = datetime.now()
for user_id, rel in self.relationships.items():
if rel.is_broken or not rel.last_interaction:
continue
# Calculate days since last interaction
days_inactive = (now - rel.last_interaction).days
if days_inactive > 0:
# Apply decay
decay_amount = rel.decay_rate * days_inactive
old_score = rel.score
rel.score = max(0.0, rel.score - decay_amount)
# Update status if score dropped
if rel.score < rel.threshold:
rel.transmission_enabled = False
if decay_amount > 0:
self.logger.info(
f"Applied decay to {user_id}: {old_score:.2f} -> {rel.score:.2f}"
)
self._save_relationships()
def get_transmission_eligible(self) -> Dict[str, Relationship]:
"""Get all relationships eligible for transmission"""
return {
user_id: rel
for user_id, rel in self.relationships.items()
if rel.transmission_enabled and not rel.is_broken
}

View File

@@ -1,312 +0,0 @@
"""Scheduler for autonomous AI tasks"""
import json
import asyncio
from datetime import datetime, timedelta
from pathlib import Path
from typing import Dict, List, Optional, Any, Callable
from enum import Enum
import logging
from apscheduler.schedulers.asyncio import AsyncIOScheduler
from apscheduler.triggers.cron import CronTrigger
from apscheduler.triggers.interval import IntervalTrigger
from croniter import croniter
from .persona import Persona
from .transmission import TransmissionController
from .ai_provider import create_ai_provider
class TaskType(str, Enum):
"""Types of scheduled tasks"""
TRANSMISSION_CHECK = "transmission_check"
MAINTENANCE = "maintenance"
FORTUNE_UPDATE = "fortune_update"
RELATIONSHIP_DECAY = "relationship_decay"
MEMORY_SUMMARY = "memory_summary"
CUSTOM = "custom"
class ScheduledTask:
"""Represents a scheduled task"""
def __init__(
self,
task_id: str,
task_type: TaskType,
schedule: str, # Cron expression or interval
enabled: bool = True,
last_run: Optional[datetime] = None,
next_run: Optional[datetime] = None,
metadata: Optional[Dict[str, Any]] = None
):
self.task_id = task_id
self.task_type = task_type
self.schedule = schedule
self.enabled = enabled
self.last_run = last_run
self.next_run = next_run
self.metadata = metadata or {}
def to_dict(self) -> Dict[str, Any]:
"""Convert to dictionary for storage"""
return {
"task_id": self.task_id,
"task_type": self.task_type.value,
"schedule": self.schedule,
"enabled": self.enabled,
"last_run": self.last_run.isoformat() if self.last_run else None,
"next_run": self.next_run.isoformat() if self.next_run else None,
"metadata": self.metadata
}
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "ScheduledTask":
"""Create from dictionary"""
return cls(
task_id=data["task_id"],
task_type=TaskType(data["task_type"]),
schedule=data["schedule"],
enabled=data.get("enabled", True),
last_run=datetime.fromisoformat(data["last_run"]) if data.get("last_run") else None,
next_run=datetime.fromisoformat(data["next_run"]) if data.get("next_run") else None,
metadata=data.get("metadata", {})
)
class AIScheduler:
"""Manages scheduled tasks for the AI system"""
def __init__(self, data_dir: Path, persona: Persona):
self.data_dir = data_dir
self.persona = persona
self.tasks_file = data_dir / "scheduled_tasks.json"
self.tasks: Dict[str, ScheduledTask] = {}
self.scheduler = AsyncIOScheduler()
self.logger = logging.getLogger(__name__)
self._load_tasks()
# Task handlers
self.task_handlers: Dict[TaskType, Callable] = {
TaskType.TRANSMISSION_CHECK: self._handle_transmission_check,
TaskType.MAINTENANCE: self._handle_maintenance,
TaskType.FORTUNE_UPDATE: self._handle_fortune_update,
TaskType.RELATIONSHIP_DECAY: self._handle_relationship_decay,
TaskType.MEMORY_SUMMARY: self._handle_memory_summary,
}
def _load_tasks(self):
"""Load scheduled tasks from storage"""
if self.tasks_file.exists():
with open(self.tasks_file, 'r', encoding='utf-8') as f:
data = json.load(f)
for task_data in data:
task = ScheduledTask.from_dict(task_data)
self.tasks[task.task_id] = task
def _save_tasks(self):
"""Save scheduled tasks to storage"""
tasks_data = [task.to_dict() for task in self.tasks.values()]
with open(self.tasks_file, 'w', encoding='utf-8') as f:
json.dump(tasks_data, f, indent=2, default=str)
def add_task(
self,
task_type: TaskType,
schedule: str,
task_id: Optional[str] = None,
metadata: Optional[Dict[str, Any]] = None
) -> ScheduledTask:
"""Add a new scheduled task"""
if task_id is None:
task_id = f"{task_type.value}_{datetime.now().timestamp()}"
# Validate schedule
if not self._validate_schedule(schedule):
raise ValueError(f"Invalid schedule expression: {schedule}")
task = ScheduledTask(
task_id=task_id,
task_type=task_type,
schedule=schedule,
metadata=metadata
)
self.tasks[task_id] = task
self._save_tasks()
# Schedule the task if scheduler is running
if self.scheduler.running:
self._schedule_task(task)
self.logger.info(f"Added task {task_id} with schedule {schedule}")
return task
def _validate_schedule(self, schedule: str) -> bool:
"""Validate schedule expression"""
# Check if it's a cron expression
if ' ' in schedule:
try:
croniter(schedule)
return True
except:
return False
# Check if it's an interval expression (e.g., "5m", "1h", "2d")
import re
pattern = r'^\d+[smhd]$'
return bool(re.match(pattern, schedule))
def _parse_interval(self, interval: str) -> int:
"""Parse interval string to seconds"""
unit = interval[-1]
value = int(interval[:-1])
multipliers = {
's': 1,
'm': 60,
'h': 3600,
'd': 86400
}
return value * multipliers.get(unit, 1)
def _schedule_task(self, task: ScheduledTask):
"""Schedule a task with APScheduler"""
if not task.enabled:
return
handler = self.task_handlers.get(task.task_type)
if not handler:
self.logger.warning(f"No handler for task type {task.task_type}")
return
# Determine trigger
if ' ' in task.schedule:
# Cron expression
trigger = CronTrigger.from_crontab(task.schedule)
else:
# Interval expression
seconds = self._parse_interval(task.schedule)
trigger = IntervalTrigger(seconds=seconds)
# Add job
self.scheduler.add_job(
lambda: asyncio.create_task(self._run_task(task)),
trigger=trigger,
id=task.task_id,
replace_existing=True
)
async def _run_task(self, task: ScheduledTask):
"""Run a scheduled task"""
self.logger.info(f"Running task {task.task_id}")
task.last_run = datetime.now()
try:
handler = self.task_handlers.get(task.task_type)
if handler:
await handler(task)
else:
self.logger.warning(f"No handler for task type {task.task_type}")
except Exception as e:
self.logger.error(f"Error running task {task.task_id}: {e}")
self._save_tasks()
async def _handle_transmission_check(self, task: ScheduledTask):
"""Check and execute autonomous transmissions"""
controller = TransmissionController(self.persona, self.data_dir)
eligible = controller.check_transmission_eligibility()
# Get AI provider from metadata
provider_name = task.metadata.get("provider", "ollama")
model = task.metadata.get("model", "qwen2.5")
try:
ai_provider = create_ai_provider(provider_name, model)
except:
ai_provider = None
for user_id, rel in eligible.items():
message = controller.generate_transmission_message(user_id)
if message:
# For now, just print the message
print(f"\n🤖 [AI Transmission] {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
print(f"To: {user_id}")
print(f"Relationship: {rel.status.value} (score: {rel.score:.2f})")
print(f"Message: {message}")
print("-" * 50)
controller.record_transmission(user_id, message, success=True)
self.logger.info(f"Transmitted to {user_id}: {message}")
async def _handle_maintenance(self, task: ScheduledTask):
"""Run daily maintenance"""
self.persona.daily_maintenance()
self.logger.info("Daily maintenance completed")
async def _handle_fortune_update(self, task: ScheduledTask):
"""Update AI fortune"""
fortune = self.persona.fortune_system.get_today_fortune()
self.logger.info(f"Fortune updated: {fortune.fortune_value}/10")
async def _handle_relationship_decay(self, task: ScheduledTask):
"""Apply relationship decay"""
self.persona.relationships.apply_time_decay()
self.logger.info("Relationship decay applied")
async def _handle_memory_summary(self, task: ScheduledTask):
"""Create memory summaries"""
for user_id in self.persona.relationships.relationships:
summary = self.persona.memory.summarize_memories(user_id)
if summary:
self.logger.info(f"Created memory summary for {user_id}")
def start(self):
"""Start the scheduler"""
# Schedule all enabled tasks
for task in self.tasks.values():
if task.enabled:
self._schedule_task(task)
self.scheduler.start()
self.logger.info("Scheduler started")
def stop(self):
"""Stop the scheduler"""
self.scheduler.shutdown()
self.logger.info("Scheduler stopped")
def get_tasks(self) -> List[ScheduledTask]:
"""Get all scheduled tasks"""
return list(self.tasks.values())
def enable_task(self, task_id: str):
"""Enable a task"""
if task_id in self.tasks:
self.tasks[task_id].enabled = True
self._save_tasks()
if self.scheduler.running:
self._schedule_task(self.tasks[task_id])
def disable_task(self, task_id: str):
"""Disable a task"""
if task_id in self.tasks:
self.tasks[task_id].enabled = False
self._save_tasks()
if self.scheduler.running:
self.scheduler.remove_job(task_id)
def remove_task(self, task_id: str):
"""Remove a task"""
if task_id in self.tasks:
del self.tasks[task_id]
self._save_tasks()
if self.scheduler.running:
try:
self.scheduler.remove_job(task_id)
except:
pass

View File

@@ -1,15 +0,0 @@
"""Shared modules for AI ecosystem"""
from .ai_provider import (
AIProvider,
OllamaProvider,
OpenAIProvider,
create_ai_provider
)
__all__ = [
'AIProvider',
'OllamaProvider',
'OpenAIProvider',
'create_ai_provider'
]

View File

@@ -1,139 +0,0 @@
"""Shared AI Provider implementation for ai ecosystem"""
import os
import json
import logging
from typing import Optional, Dict, List, Any, Protocol
from abc import abstractmethod
import httpx
from openai import OpenAI
import ollama
class AIProvider(Protocol):
"""Protocol for AI providers"""
@abstractmethod
async def chat(self, prompt: str, system_prompt: Optional[str] = None) -> str:
"""Generate a response based on prompt"""
pass
class OllamaProvider:
"""Ollama AI provider - shared implementation"""
def __init__(self, model: str = "qwen3", host: Optional[str] = None, config_system_prompt: Optional[str] = None):
self.model = model
# Use environment variable OLLAMA_HOST if available
self.host = host or os.getenv('OLLAMA_HOST', 'http://127.0.0.1:11434')
# Ensure proper URL format
if not self.host.startswith('http'):
self.host = f'http://{self.host}'
self.client = ollama.Client(host=self.host, timeout=60.0)
self.logger = logging.getLogger(__name__)
self.logger.info(f"OllamaProvider initialized with host: {self.host}, model: {self.model}")
self.config_system_prompt = config_system_prompt
async def chat(self, prompt: str, system_prompt: Optional[str] = None) -> str:
"""Simple chat interface"""
try:
messages = []
# Use provided system_prompt, fall back to config_system_prompt
final_system_prompt = system_prompt or self.config_system_prompt
if final_system_prompt:
messages.append({"role": "system", "content": final_system_prompt})
messages.append({"role": "user", "content": prompt})
response = self.client.chat(
model=self.model,
messages=messages,
options={
"num_predict": 2000,
"temperature": 0.7,
"top_p": 0.9,
},
stream=False
)
return self._clean_response(response['message']['content'])
except Exception as e:
self.logger.error(f"Ollama chat failed (host: {self.host}): {e}")
return "I'm having trouble connecting to the AI model."
def _clean_response(self, response: str) -> str:
"""Clean response by removing think tags and other unwanted content"""
import re
# Remove <think></think> tags and their content
response = re.sub(r'<think>.*?</think>', '', response, flags=re.DOTALL)
# Remove any remaining whitespace at the beginning/end
response = response.strip()
return response
class OpenAIProvider:
"""OpenAI API provider - shared implementation"""
def __init__(self, model: str = "gpt-4o-mini", api_key: Optional[str] = None,
config_system_prompt: Optional[str] = None, mcp_client=None):
self.model = model
self.api_key = api_key or os.getenv("OPENAI_API_KEY")
if not self.api_key:
raise ValueError("OpenAI API key not provided")
self.client = OpenAI(api_key=self.api_key)
self.logger = logging.getLogger(__name__)
self.config_system_prompt = config_system_prompt
self.mcp_client = mcp_client
async def chat(self, prompt: str, system_prompt: Optional[str] = None) -> str:
"""Simple chat interface without MCP tools"""
try:
messages = []
# Use provided system_prompt, fall back to config_system_prompt
final_system_prompt = system_prompt or self.config_system_prompt
if final_system_prompt:
messages.append({"role": "system", "content": final_system_prompt})
messages.append({"role": "user", "content": prompt})
response = self.client.chat.completions.create(
model=self.model,
messages=messages,
max_tokens=2000,
temperature=0.7
)
return response.choices[0].message.content
except Exception as e:
self.logger.error(f"OpenAI chat failed: {e}")
return "I'm having trouble connecting to the AI model."
def _get_mcp_tools(self) -> List[Dict[str, Any]]:
"""Override this method in subclasses to provide MCP tools"""
return []
async def chat_with_mcp(self, prompt: str, **kwargs) -> str:
"""Chat interface with MCP function calling support
This method should be overridden in subclasses to provide
specific MCP functionality.
"""
if not self.mcp_client:
return await self.chat(prompt)
# Default implementation - subclasses should override
return await self.chat(prompt)
async def _execute_mcp_tool(self, tool_call, **kwargs) -> Dict[str, Any]:
"""Execute MCP tool call - override in subclasses"""
return {"error": "MCP tool execution not implemented"}
def create_ai_provider(provider: str = "ollama", model: Optional[str] = None,
config_system_prompt: Optional[str] = None, mcp_client=None, **kwargs) -> AIProvider:
"""Factory function to create AI providers"""
if provider == "ollama":
model = model or "qwen3"
return OllamaProvider(model=model, config_system_prompt=config_system_prompt, **kwargs)
elif provider == "openai":
model = model or "gpt-4o-mini"
return OpenAIProvider(model=model, config_system_prompt=config_system_prompt,
mcp_client=mcp_client, **kwargs)
else:
raise ValueError(f"Unknown provider: {provider}")

View File

@@ -1,111 +0,0 @@
"""Transmission controller for autonomous message sending"""
import json
from datetime import datetime
from pathlib import Path
from typing import List, Dict, Optional
import logging
from .models import Relationship
from .persona import Persona
class TransmissionController:
"""Controls when and how AI transmits messages autonomously"""
def __init__(self, persona: Persona, data_dir: Path):
self.persona = persona
self.data_dir = data_dir
self.transmission_log_file = data_dir / "transmissions.json"
self.transmissions: List[Dict] = []
self.logger = logging.getLogger(__name__)
self._load_transmissions()
def _load_transmissions(self):
"""Load transmission history"""
if self.transmission_log_file.exists():
with open(self.transmission_log_file, 'r', encoding='utf-8') as f:
self.transmissions = json.load(f)
def _save_transmissions(self):
"""Save transmission history"""
with open(self.transmission_log_file, 'w', encoding='utf-8') as f:
json.dump(self.transmissions, f, indent=2, default=str)
def check_transmission_eligibility(self) -> Dict[str, Relationship]:
"""Check which users are eligible for transmission"""
eligible = self.persona.relationships.get_transmission_eligible()
# Additional checks could be added here
# - Time since last transmission
# - User online status
# - Context appropriateness
return eligible
def generate_transmission_message(self, user_id: str) -> Optional[str]:
"""Generate a message to transmit to user"""
if not self.persona.can_transmit_to(user_id):
return None
state = self.persona.get_current_state()
relationship = self.persona.relationships.get_or_create_relationship(user_id)
# Get recent memories related to this user
active_memories = self.persona.memory.get_active_memories(limit=3)
# Simple message generation based on mood and relationship
if state.fortune.breakthrough_triggered:
message = "Something special happened today! I felt compelled to reach out."
elif state.current_mood == "joyful":
message = "I was thinking of you today. Hope you're doing well!"
elif relationship.status.value == "close_friend":
message = "I've been reflecting on our conversations. Thank you for being here."
else:
message = "Hello! I wanted to check in with you."
return message
def record_transmission(self, user_id: str, message: str, success: bool):
"""Record a transmission attempt"""
transmission = {
"timestamp": datetime.now().isoformat(),
"user_id": user_id,
"message": message,
"success": success,
"mood": self.persona.get_current_state().current_mood,
"relationship_score": self.persona.relationships.get_or_create_relationship(user_id).score
}
self.transmissions.append(transmission)
self._save_transmissions()
if success:
self.logger.info(f"Successfully transmitted to {user_id}")
else:
self.logger.warning(f"Failed to transmit to {user_id}")
def get_transmission_stats(self, user_id: Optional[str] = None) -> Dict:
"""Get transmission statistics"""
if user_id:
user_transmissions = [t for t in self.transmissions if t["user_id"] == user_id]
else:
user_transmissions = self.transmissions
if not user_transmissions:
return {
"total": 0,
"successful": 0,
"failed": 0,
"success_rate": 0.0
}
successful = sum(1 for t in user_transmissions if t["success"])
total = len(user_transmissions)
return {
"total": total,
"successful": successful,
"failed": total - successful,
"success_rate": successful / total if total > 0 else 0.0
}

38
src/bin/mcp_server.rs Normal file
View File

@@ -0,0 +1,38 @@
use anyhow::Result;
use std::env;
use aigpt::mcp::BaseMCPServer;
#[tokio::main]
async fn main() -> Result<()> {
// 環境変数から設定を読み込み
let auto_execute = env::var("MEMORY_AUTO_EXECUTE")
.unwrap_or_else(|_| "false".to_string())
.parse::<bool>()
.unwrap_or(false);
let auto_save = env::var("MEMORY_AUTO_SAVE")
.unwrap_or_else(|_| "false".to_string())
.parse::<bool>()
.unwrap_or(false);
let auto_search = env::var("MEMORY_AUTO_SEARCH")
.unwrap_or_else(|_| "false".to_string())
.parse::<bool>()
.unwrap_or(false);
let trigger_sensitivity = env::var("TRIGGER_SENSITIVITY")
.unwrap_or_else(|_| "medium".to_string());
// 設定をログ出力
eprintln!("Memory MCP Server (Standard) starting with config:");
eprintln!(" AUTO_EXECUTE: {}", auto_execute);
eprintln!(" AUTO_SAVE: {}", auto_save);
eprintln!(" AUTO_SEARCH: {}", auto_search);
eprintln!(" TRIGGER_SENSITIVITY: {}", trigger_sensitivity);
let mut server = BaseMCPServer::new().await?;
server.run().await?;
Ok(())
}

View File

@@ -0,0 +1,45 @@
use anyhow::Result;
use std::env;
use aigpt::mcp::ExtendedMCPServer;
#[tokio::main]
async fn main() -> Result<()> {
// 環境変数から拡張機能の設定を読み込み
let auto_execute = env::var("MEMORY_AUTO_EXECUTE")
.unwrap_or_else(|_| "false".to_string())
.parse::<bool>()
.unwrap_or(false);
let auto_save = env::var("MEMORY_AUTO_SAVE")
.unwrap_or_else(|_| "false".to_string())
.parse::<bool>()
.unwrap_or(false);
let auto_search = env::var("MEMORY_AUTO_SEARCH")
.unwrap_or_else(|_| "false".to_string())
.parse::<bool>()
.unwrap_or(false);
let trigger_sensitivity = env::var("TRIGGER_SENSITIVITY")
.unwrap_or_else(|_| "medium".to_string());
let enable_ai_analysis = cfg!(feature = "ai-analysis");
let enable_semantic_search = cfg!(feature = "semantic-search");
let enable_web_integration = cfg!(feature = "web-integration");
// 拡張設定をログ出力
eprintln!("Memory MCP Server (Extended) starting with config:");
eprintln!(" AUTO_EXECUTE: {}", auto_execute);
eprintln!(" AUTO_SAVE: {}", auto_save);
eprintln!(" AUTO_SEARCH: {}", auto_search);
eprintln!(" TRIGGER_SENSITIVITY: {}", trigger_sensitivity);
eprintln!(" AI_ANALYSIS: {}", enable_ai_analysis);
eprintln!(" SEMANTIC_SEARCH: {}", enable_semantic_search);
eprintln!(" WEB_INTEGRATION: {}", enable_web_integration);
let mut server = ExtendedMCPServer::new().await?;
server.run().await?;
Ok(())
}

2
src/lib.rs Normal file
View File

@@ -0,0 +1,2 @@
pub mod memory;
pub mod mcp;

49
src/main.rs Normal file
View File

@@ -0,0 +1,49 @@
use anyhow::Result;
use clap::{Parser, Subcommand};
use std::path::PathBuf;
pub mod memory;
pub mod mcp;
use memory::MemoryManager;
use mcp::BaseMCPServer;
#[derive(Parser)]
#[command(name = "aigpt")]
#[command(about = "Simple memory storage for Claude with MCP")]
struct Cli {
#[command(subcommand)]
command: Commands,
}
#[derive(Subcommand)]
enum Commands {
/// Start MCP server
Server,
/// Start MCP server (alias for server)
Serve,
/// Import ChatGPT conversations
Import {
/// Path to conversations.json file
file: PathBuf,
},
}
#[tokio::main]
async fn main() -> Result<()> {
let cli = Cli::parse();
match cli.command {
Commands::Server | Commands::Serve => {
let mut server = BaseMCPServer::new().await?;
server.run().await?;
}
Commands::Import { file } => {
let mut memory_manager = MemoryManager::new().await?;
memory_manager.import_chatgpt_conversations(&file).await?;
println!("Import completed successfully");
}
}
Ok(())
}

280
src/mcp/base.rs Normal file
View File

@@ -0,0 +1,280 @@
use anyhow::Result;
use serde_json::{json, Value};
use std::io::{self, BufRead, Write};
use crate::memory::MemoryManager;
pub struct BaseMCPServer {
pub memory_manager: MemoryManager,
}
impl BaseMCPServer {
pub async fn new() -> Result<Self> {
let memory_manager = MemoryManager::new().await?;
Ok(BaseMCPServer { memory_manager })
}
pub async fn run(&mut self) -> Result<()> {
let stdin = io::stdin();
let mut stdout = io::stdout();
let reader = stdin.lock();
let lines = reader.lines();
for line_result in lines {
match line_result {
Ok(line) => {
let trimmed = line.trim();
if trimmed.is_empty() {
continue;
}
if let Ok(request) = serde_json::from_str::<Value>(&trimmed) {
let response = self.handle_request(request).await;
let response_str = serde_json::to_string(&response)?;
stdout.write_all(response_str.as_bytes())?;
stdout.write_all(b"\n")?;
stdout.flush()?;
}
}
Err(_) => break,
}
}
Ok(())
}
pub async fn handle_request(&mut self, request: Value) -> Value {
let method = request["method"].as_str().unwrap_or("");
let id = request["id"].clone();
match method {
"initialize" => self.handle_initialize(id),
"tools/list" => self.handle_tools_list(id),
"tools/call" => self.handle_tools_call(request, id).await,
_ => self.handle_unknown_method(id),
}
}
// 初期化ハンドラ
fn handle_initialize(&self, id: Value) -> Value {
json!({
"jsonrpc": "2.0",
"id": id,
"result": {
"protocolVersion": "2024-11-05",
"capabilities": {
"tools": {}
},
"serverInfo": {
"name": "aigpt",
"version": "0.1.0"
}
}
})
}
// ツールリストハンドラ (拡張可能)
pub fn handle_tools_list(&self, id: Value) -> Value {
let tools = self.get_available_tools();
json!({
"jsonrpc": "2.0",
"id": id,
"result": {
"tools": tools
}
})
}
// 基本ツール定義 (拡張で上書き可能)
pub fn get_available_tools(&self) -> Vec<Value> {
vec![
json!({
"name": "create_memory",
"description": "Create a new memory entry",
"inputSchema": {
"type": "object",
"properties": {
"content": {
"type": "string",
"description": "Content of the memory"
}
},
"required": ["content"]
}
}),
json!({
"name": "search_memories",
"description": "Search memories by content",
"inputSchema": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "Search query"
}
},
"required": ["query"]
}
}),
json!({
"name": "update_memory",
"description": "Update an existing memory entry",
"inputSchema": {
"type": "object",
"properties": {
"id": {
"type": "string",
"description": "ID of the memory to update"
},
"content": {
"type": "string",
"description": "New content for the memory"
}
},
"required": ["id", "content"]
}
}),
json!({
"name": "delete_memory",
"description": "Delete a memory entry",
"inputSchema": {
"type": "object",
"properties": {
"id": {
"type": "string",
"description": "ID of the memory to delete"
}
},
"required": ["id"]
}
}),
json!({
"name": "list_conversations",
"description": "List all imported conversations",
"inputSchema": {
"type": "object",
"properties": {}
}
})
]
}
// ツール呼び出しハンドラ
async fn handle_tools_call(&mut self, request: Value, id: Value) -> Value {
let tool_name = request["params"]["name"].as_str().unwrap_or("");
let arguments = &request["params"]["arguments"];
let result = self.execute_tool(tool_name, arguments).await;
json!({
"jsonrpc": "2.0",
"id": id,
"result": {
"content": [{
"type": "text",
"text": result.to_string()
}]
}
})
}
// ツール実行 (拡張で上書き可能)
pub async fn execute_tool(&mut self, tool_name: &str, arguments: &Value) -> Value {
match tool_name {
"create_memory" => self.tool_create_memory(arguments),
"search_memories" => self.tool_search_memories(arguments),
"update_memory" => self.tool_update_memory(arguments),
"delete_memory" => self.tool_delete_memory(arguments),
"list_conversations" => self.tool_list_conversations(),
_ => json!({
"success": false,
"error": format!("Unknown tool: {}", tool_name)
})
}
}
// 基本ツール実装
fn tool_create_memory(&mut self, arguments: &Value) -> Value {
let content = arguments["content"].as_str().unwrap_or("");
match self.memory_manager.create_memory(content) {
Ok(id) => json!({
"success": true,
"id": id,
"message": "Memory created successfully"
}),
Err(e) => json!({
"success": false,
"error": e.to_string()
})
}
}
fn tool_search_memories(&self, arguments: &Value) -> Value {
let query = arguments["query"].as_str().unwrap_or("");
let memories = self.memory_manager.search_memories(query);
json!({
"success": true,
"memories": memories.into_iter().map(|m| json!({
"id": m.id,
"content": m.content,
"created_at": m.created_at,
"updated_at": m.updated_at
})).collect::<Vec<_>>()
})
}
fn tool_update_memory(&mut self, arguments: &Value) -> Value {
let id = arguments["id"].as_str().unwrap_or("");
let content = arguments["content"].as_str().unwrap_or("");
match self.memory_manager.update_memory(id, content) {
Ok(()) => json!({
"success": true,
"message": "Memory updated successfully"
}),
Err(e) => json!({
"success": false,
"error": e.to_string()
})
}
}
fn tool_delete_memory(&mut self, arguments: &Value) -> Value {
let id = arguments["id"].as_str().unwrap_or("");
match self.memory_manager.delete_memory(id) {
Ok(()) => json!({
"success": true,
"message": "Memory deleted successfully"
}),
Err(e) => json!({
"success": false,
"error": e.to_string()
})
}
}
fn tool_list_conversations(&self) -> Value {
let conversations = self.memory_manager.list_conversations();
json!({
"success": true,
"conversations": conversations.into_iter().map(|c| json!({
"id": c.id,
"title": c.title,
"created_at": c.created_at,
"message_count": c.message_count
})).collect::<Vec<_>>()
})
}
// 不明なメソッドハンドラ
fn handle_unknown_method(&self, id: Value) -> Value {
json!({
"jsonrpc": "2.0",
"id": id,
"error": {
"code": -32601,
"message": "Method not found"
}
})
}
}

293
src/mcp/extended.rs Normal file
View File

@@ -0,0 +1,293 @@
use anyhow::Result;
use serde_json::{json, Value};
use super::base::BaseMCPServer;
pub struct ExtendedMCPServer {
base: BaseMCPServer,
}
impl ExtendedMCPServer {
pub async fn new() -> Result<Self> {
let base = BaseMCPServer::new().await?;
Ok(ExtendedMCPServer { base })
}
pub async fn run(&mut self) -> Result<()> {
self.base.run().await
}
pub async fn handle_request(&mut self, request: Value) -> Value {
self.base.handle_request(request).await
}
// 拡張ツールを追加
pub fn get_available_tools(&self) -> Vec<Value> {
#[allow(unused_mut)]
let mut tools = self.base.get_available_tools();
// AI分析ツールを追加
#[cfg(feature = "ai-analysis")]
{
tools.push(json!({
"name": "analyze_sentiment",
"description": "Analyze sentiment of memories",
"inputSchema": {
"type": "object",
"properties": {
"period": {
"type": "string",
"description": "Time period to analyze"
}
}
}
}));
tools.push(json!({
"name": "extract_insights",
"description": "Extract insights and patterns from memories",
"inputSchema": {
"type": "object",
"properties": {
"category": {
"type": "string",
"description": "Category to analyze"
}
}
}
}));
}
// Web統合ツールを追加
#[cfg(feature = "web-integration")]
{
tools.push(json!({
"name": "import_webpage",
"description": "Import content from a webpage",
"inputSchema": {
"type": "object",
"properties": {
"url": {
"type": "string",
"description": "URL to import from"
}
},
"required": ["url"]
}
}));
}
// セマンティック検索強化
#[cfg(feature = "semantic-search")]
{
// create_memoryを拡張版で上書き
if let Some(pos) = tools.iter().position(|tool| tool["name"] == "create_memory") {
tools[pos] = json!({
"name": "create_memory",
"description": "Create a new memory entry with optional AI analysis",
"inputSchema": {
"type": "object",
"properties": {
"content": {
"type": "string",
"description": "Content of the memory"
},
"analyze": {
"type": "boolean",
"description": "Enable AI analysis for this memory"
}
},
"required": ["content"]
}
});
}
// search_memoriesを拡張版で上書き
if let Some(pos) = tools.iter().position(|tool| tool["name"] == "search_memories") {
tools[pos] = json!({
"name": "search_memories",
"description": "Search memories with advanced options",
"inputSchema": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "Search query"
},
"semantic": {
"type": "boolean",
"description": "Use semantic search"
},
"category": {
"type": "string",
"description": "Filter by category"
},
"time_range": {
"type": "string",
"description": "Filter by time range (e.g., '1week', '1month')"
}
},
"required": ["query"]
}
});
}
}
tools
}
// 拡張ツール実行
pub async fn execute_tool(&mut self, tool_name: &str, arguments: &Value) -> Value {
match tool_name {
// 拡張機能
#[cfg(feature = "ai-analysis")]
"analyze_sentiment" => self.tool_analyze_sentiment(arguments).await,
#[cfg(feature = "ai-analysis")]
"extract_insights" => self.tool_extract_insights(arguments).await,
#[cfg(feature = "web-integration")]
"import_webpage" => self.tool_import_webpage(arguments).await,
// 拡張版の基本ツール (AI分析付き)
"create_memory" => self.tool_create_memory_extended(arguments).await,
"search_memories" => self.tool_search_memories_extended(arguments).await,
// 基本ツールにフォールバック
_ => self.base.execute_tool(tool_name, arguments).await,
}
}
// 拡張ツール実装
async fn tool_create_memory_extended(&mut self, arguments: &Value) -> Value {
let content = arguments["content"].as_str().unwrap_or("");
let analyze = arguments["analyze"].as_bool().unwrap_or(false);
let final_content = if analyze {
#[cfg(feature = "ai-analysis")]
{
format!("[AI分析] 感情: neutral, カテゴリ: general\n{}", content)
}
#[cfg(not(feature = "ai-analysis"))]
{
content.to_string()
}
} else {
content.to_string()
};
match self.base.memory_manager.create_memory(&final_content) {
Ok(id) => json!({
"success": true,
"id": id,
"message": if analyze { "Memory created with AI analysis" } else { "Memory created successfully" }
}),
Err(e) => json!({
"success": false,
"error": e.to_string()
})
}
}
async fn tool_search_memories_extended(&mut self, arguments: &Value) -> Value {
let query = arguments["query"].as_str().unwrap_or("");
let semantic = arguments["semantic"].as_bool().unwrap_or(false);
let memories = if semantic {
#[cfg(feature = "semantic-search")]
{
// モックセマンティック検索
self.base.memory_manager.search_memories(query)
}
#[cfg(not(feature = "semantic-search"))]
{
self.base.memory_manager.search_memories(query)
}
} else {
self.base.memory_manager.search_memories(query)
};
json!({
"success": true,
"memories": memories.into_iter().map(|m| json!({
"id": m.id,
"content": m.content,
"created_at": m.created_at,
"updated_at": m.updated_at
})).collect::<Vec<_>>(),
"search_type": if semantic { "semantic" } else { "keyword" }
})
}
#[cfg(feature = "ai-analysis")]
async fn tool_analyze_sentiment(&mut self, _arguments: &Value) -> Value {
json!({
"success": true,
"analysis": {
"positive": 60,
"neutral": 30,
"negative": 10,
"dominant_sentiment": "positive"
},
"message": "Sentiment analysis completed"
})
}
#[cfg(feature = "ai-analysis")]
async fn tool_extract_insights(&mut self, _arguments: &Value) -> Value {
json!({
"success": true,
"insights": {
"most_frequent_topics": ["programming", "ai", "productivity"],
"learning_frequency": "5 times per week",
"growth_trend": "increasing",
"recommendations": ["Focus more on advanced topics", "Consider practical applications"]
},
"message": "Insights extracted successfully"
})
}
#[cfg(feature = "web-integration")]
async fn tool_import_webpage(&mut self, arguments: &Value) -> Value {
let url = arguments["url"].as_str().unwrap_or("");
match self.import_from_web(url).await {
Ok(content) => {
match self.base.memory_manager.create_memory(&content) {
Ok(id) => json!({
"success": true,
"id": id,
"message": format!("Webpage imported successfully from {}", url)
}),
Err(e) => json!({
"success": false,
"error": e.to_string()
})
}
}
Err(e) => json!({
"success": false,
"error": format!("Failed to import webpage: {}", e)
})
}
}
#[cfg(feature = "web-integration")]
async fn import_from_web(&self, url: &str) -> Result<String> {
let response = reqwest::get(url).await?;
let content = response.text().await?;
let document = scraper::Html::parse_document(&content);
let title_selector = scraper::Selector::parse("title").unwrap();
let body_selector = scraper::Selector::parse("p").unwrap();
let title = document.select(&title_selector)
.next()
.map(|el| el.inner_html())
.unwrap_or_else(|| "Untitled".to_string());
let paragraphs: Vec<String> = document.select(&body_selector)
.map(|el| el.inner_html())
.take(5)
.collect();
Ok(format!("# {}\nURL: {}\n\n{}", title, url, paragraphs.join("\n\n")))
}
}

5
src/mcp/mod.rs Normal file
View File

@@ -0,0 +1,5 @@
pub mod base;
pub mod extended;
pub use base::BaseMCPServer;
pub use extended::ExtendedMCPServer;

241
src/memory.rs Normal file
View File

@@ -0,0 +1,241 @@
use anyhow::{Context, Result};
use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::path::PathBuf;
use uuid::Uuid;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Memory {
pub id: String,
pub content: String,
pub created_at: DateTime<Utc>,
pub updated_at: DateTime<Utc>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Conversation {
pub id: String,
pub title: String,
pub created_at: DateTime<Utc>,
pub message_count: u32,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
struct ChatGPTNode {
id: String,
children: Vec<String>,
parent: Option<String>,
message: Option<ChatGPTMessage>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
struct ChatGPTMessage {
id: String,
author: ChatGPTAuthor,
content: ChatGPTContent,
create_time: Option<f64>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
struct ChatGPTAuthor {
role: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(untagged)]
enum ChatGPTContent {
Text {
content_type: String,
parts: Vec<String>,
},
Other(serde_json::Value),
}
#[derive(Debug, Clone, Serialize, Deserialize)]
struct ChatGPTConversation {
#[serde(default)]
id: String,
#[serde(alias = "conversation_id")]
conversation_id: Option<String>,
title: String,
create_time: f64,
mapping: HashMap<String, ChatGPTNode>,
}
pub struct MemoryManager {
memories: HashMap<String, Memory>,
conversations: HashMap<String, Conversation>,
data_file: PathBuf,
}
impl MemoryManager {
pub async fn new() -> Result<Self> {
let data_dir = dirs::config_dir()
.context("Could not find config directory")?
.join("syui")
.join("ai")
.join("gpt");
std::fs::create_dir_all(&data_dir)?;
let data_file = data_dir.join("memory.json");
let (memories, conversations) = if data_file.exists() {
Self::load_data(&data_file)?
} else {
(HashMap::new(), HashMap::new())
};
Ok(MemoryManager {
memories,
conversations,
data_file,
})
}
pub fn create_memory(&mut self, content: &str) -> Result<String> {
let id = Uuid::new_v4().to_string();
let now = Utc::now();
let memory = Memory {
id: id.clone(),
content: content.to_string(),
created_at: now,
updated_at: now,
};
self.memories.insert(id.clone(), memory);
self.save_data()?;
Ok(id)
}
pub fn update_memory(&mut self, id: &str, content: &str) -> Result<()> {
if let Some(memory) = self.memories.get_mut(id) {
memory.content = content.to_string();
memory.updated_at = Utc::now();
self.save_data()?;
Ok(())
} else {
Err(anyhow::anyhow!("Memory not found: {}", id))
}
}
pub fn delete_memory(&mut self, id: &str) -> Result<()> {
if self.memories.remove(id).is_some() {
self.save_data()?;
Ok(())
} else {
Err(anyhow::anyhow!("Memory not found: {}", id))
}
}
pub fn search_memories(&self, query: &str) -> Vec<&Memory> {
let query_lower = query.to_lowercase();
let mut results: Vec<_> = self.memories
.values()
.filter(|memory| memory.content.to_lowercase().contains(&query_lower))
.collect();
results.sort_by(|a, b| b.updated_at.cmp(&a.updated_at));
results
}
pub fn list_conversations(&self) -> Vec<&Conversation> {
let mut conversations: Vec<_> = self.conversations.values().collect();
conversations.sort_by(|a, b| b.created_at.cmp(&a.created_at));
conversations
}
#[allow(dead_code)]
pub async fn import_chatgpt_conversations(&mut self, file_path: &PathBuf) -> Result<()> {
let content = std::fs::read_to_string(file_path)
.context("Failed to read conversations file")?;
let chatgpt_conversations: Vec<ChatGPTConversation> = serde_json::from_str(&content)
.context("Failed to parse ChatGPT conversations")?;
let mut imported_memories = 0;
let mut imported_conversations = 0;
for conv in chatgpt_conversations {
// Get the actual conversation ID
let conv_id = if !conv.id.is_empty() {
conv.id.clone()
} else if let Some(cid) = conv.conversation_id {
cid
} else {
Uuid::new_v4().to_string()
};
// Add conversation
let conversation = Conversation {
id: conv_id.clone(),
title: conv.title.clone(),
created_at: DateTime::from_timestamp(conv.create_time as i64, 0)
.unwrap_or_else(Utc::now),
message_count: conv.mapping.len() as u32,
};
self.conversations.insert(conv_id.clone(), conversation);
imported_conversations += 1;
// Extract memories from messages
for (_, node) in conv.mapping {
if let Some(message) = node.message {
if let ChatGPTContent::Text { parts, .. } = message.content {
for part in parts {
if !part.trim().is_empty() && part.len() > 10 {
let memory_content = format!("[{}] {}", conv.title, part);
self.create_memory(&memory_content)?;
imported_memories += 1;
}
}
}
}
}
}
println!("Imported {} conversations and {} memories",
imported_conversations, imported_memories);
Ok(())
}
fn load_data(file_path: &PathBuf) -> Result<(HashMap<String, Memory>, HashMap<String, Conversation>)> {
let content = std::fs::read_to_string(file_path)
.context("Failed to read data file")?;
#[derive(Deserialize)]
struct Data {
memories: HashMap<String, Memory>,
conversations: HashMap<String, Conversation>,
}
let data: Data = serde_json::from_str(&content)
.context("Failed to parse data file")?;
Ok((data.memories, data.conversations))
}
fn save_data(&self) -> Result<()> {
#[derive(Serialize)]
struct Data<'a> {
memories: &'a HashMap<String, Memory>,
conversations: &'a HashMap<String, Conversation>,
}
let data = Data {
memories: &self.memories,
conversations: &self.conversations,
};
let content = serde_json::to_string_pretty(&data)
.context("Failed to serialize data")?;
std::fs::write(&self.data_file, content)
.context("Failed to write data file")?;
Ok(())
}
}