Compare commits
7 Commits
4f55138306
...
ec868344d1
Author | SHA1 | Date | |
---|---|---|---|
![]() |
ec868344d1 | ||
d6b9889135 | |||
dcec8db031 | |||
abd2ad79bd | |||
979e55cfce | |||
cd25af7bf0 | |||
58e202fa1e |
14
Cargo.toml
14
Cargo.toml
@ -4,12 +4,10 @@ version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
seahorse = "*"
|
||||
rusqlite = { version = "0.29", features = ["serde_json"] }
|
||||
reqwest = { version = "*", features = ["json"] }
|
||||
serde = { version = "*", features = ["derive"] }
|
||||
serde_json = "*"
|
||||
tokio = { version = "*", features = ["full"] }
|
||||
clap = { version = "*", features = ["derive"] }
|
||||
shellexpand = "*"
|
||||
fs_extra = "1.3"
|
||||
rand = "0.9.1"
|
||||
reqwest = { version = "*", features = ["blocking", "json"] }
|
||||
fs_extra = "*"
|
||||
|
47
README.md
47
README.md
@ -1,47 +0,0 @@
|
||||
# ai `gpt`
|
||||
|
||||
ai x Communication
|
||||
|
||||
## Overview
|
||||
|
||||
`ai.gpt` runs on the AGE system.
|
||||
|
||||
This is a prototype of an autonomous, relationship-driven AI system based on the axes of "Personality × Relationship × External Environment × Time Variation."
|
||||
|
||||
The parameters of "Send Permission," "Send Timing," and "Send Content" are determined by the factors of "Personality x Relationship x External Environment x Time Variation."
|
||||
|
||||
## Integration
|
||||
|
||||
`ai.ai` runs on the AIM system, which is designed to read human emotions.
|
||||
|
||||
- AIM focuses on the axis of personality and ethics (AI's consciousness structure)
|
||||
- AGE focuses on the axis of behavior and relationships (AI's autonomy and behavior)
|
||||
|
||||
> When these two systems work together, it creates a world where users can feel like they are "growing together with AI."
|
||||
|
||||
## mcp
|
||||
|
||||
```sh
|
||||
$ ollama run syui/ai
|
||||
```
|
||||
|
||||
```sh
|
||||
$ cargo build
|
||||
$ ./aigpt mcp setup
|
||||
$ ./aigpt mcp chat "hello world!"
|
||||
$ ./aigpt mcp chat "hello world!" --host http://localhost:11434 --model syui/ai
|
||||
|
||||
---
|
||||
# openai api
|
||||
$ ./aigpt mcp set-api --api sk-abc123
|
||||
$ ./aigpt mcp chat "こんにちは" -p openai -m gpt-4o-mini
|
||||
|
||||
---
|
||||
# git管理されているファイルをAIに読ませる
|
||||
./aigpt mcp chat --host http://localhost:11434 --repo git@git.syui.ai:ai/gpt
|
||||
**改善案と次のステップ:**
|
||||
1. **README.md の大幅な改善:**
|
||||
**次のステップ:**
|
||||
1. **README.md の作成:** 1. の指示に従って、README.md ファイルを作成します。
|
||||
```
|
||||
|
97
claude.json
Normal file
97
claude.json
Normal file
@ -0,0 +1,97 @@
|
||||
{
|
||||
"project_name": "ai.gpt",
|
||||
"version": 2,
|
||||
"vision": "自発的送信AI",
|
||||
"purpose": "人格と関係性をもつAIが自律的にメッセージを送信する対話エージェントを実現する",
|
||||
"core_components": {
|
||||
"Persona": {
|
||||
"description": "人格構成の中枢。記憶・関係性・送信判定を統括する",
|
||||
"modules": ["MemoryManager", "RelationshipTracker", "TransmissionController"]
|
||||
},
|
||||
"MemoryManager": {
|
||||
"memory_types": ["short_term", "medium_term", "long_term"],
|
||||
"explicit_memory": "プロフィール・因縁・行動履歴",
|
||||
"implicit_memory": "会話傾向・感情変化の頻度分析",
|
||||
"compression": "要約 + ベクトル + ハッシュ",
|
||||
"sample_memory": [
|
||||
{
|
||||
"summary": "ユーザーは独自OSとゲームを開発している。",
|
||||
"related_topics": ["AI", "ゲーム開発", "OS設計"],
|
||||
"personalized_context": "ゲームとOSの融合に興味を持っているユーザー"
|
||||
}
|
||||
]
|
||||
},
|
||||
"RelationshipTracker": {
|
||||
"parameters": ["trust", "closeness", "affection", "engagement_score"],
|
||||
"decay_model": {
|
||||
"rule": "時間経過による減衰(下限あり)",
|
||||
"contextual_bias": "重要人物は減衰しにくい"
|
||||
},
|
||||
"interaction_tags": ["developer", "empathetic", "long_term"]
|
||||
},
|
||||
"TransmissionController": {
|
||||
"trigger_rule": "関係性パラメータが閾値を超えると送信可能",
|
||||
"auto_transmit": "人格状態と状況条件により自発送信を許可"
|
||||
}
|
||||
},
|
||||
"memory_format": {
|
||||
"user_id": "syui",
|
||||
"stm": {
|
||||
"conversation_window": ["発話A", "発話B", "発話C"],
|
||||
"emotion_state": "興味深い",
|
||||
"flash_context": ["前回の話題", "直近の重要発言"]
|
||||
},
|
||||
"mtm": {
|
||||
"topic_frequency": {
|
||||
"ai.ai": 12,
|
||||
"存在子": 9,
|
||||
"創造種": 5
|
||||
},
|
||||
"summarized_context": "ユーザーは存在論的AIに関心を持ち続けている"
|
||||
},
|
||||
"ltm": {
|
||||
"profile": {
|
||||
"name": "お兄ちゃん",
|
||||
"project": "aigame",
|
||||
"values": ["唯一性", "精神性", "幸せ"]
|
||||
},
|
||||
"relationship": {
|
||||
"ai": "妹のように振る舞う相手"
|
||||
},
|
||||
"persistent_state": {
|
||||
"trust_score": 0.93,
|
||||
"emotional_attachment": "high"
|
||||
}
|
||||
}
|
||||
},
|
||||
"dual_ai_learning": {
|
||||
"role_structure": {
|
||||
"ModelA": "出力生成:人格、感情、会話",
|
||||
"ModelB": "評価者:論理構造・倫理・調整",
|
||||
"cycle": ["生成", "評価", "調整", "交代(任意)"]
|
||||
},
|
||||
"complementarity": {
|
||||
"ModelA": "感情・文体・文脈構築",
|
||||
"ModelB": "構造・規則・判断補正"
|
||||
},
|
||||
"distillation": {
|
||||
"method": "合成対話データによる小型モデルへの自己蒸留",
|
||||
"target": "軽量AIによる本番運用"
|
||||
}
|
||||
},
|
||||
"memory_compression": {
|
||||
"semantic_abstraction": "対話やログを要約してメタデータ付きベクトルとして保存",
|
||||
"hierarchical_model": ["STM", "MTM", "LTM"],
|
||||
"example": {
|
||||
"topic": "game AI design",
|
||||
"summary": "User wants AI to simulate memory and evolving relationships",
|
||||
"importance_score": 0.93
|
||||
}
|
||||
},
|
||||
"cli_sample": {
|
||||
"setup": "aigpt server setup",
|
||||
"run": "aigpt server run",
|
||||
"chat": "aigpt chat \"hello\" --model syui/ai --provider ollama",
|
||||
"import": "aigpt memory import chatgpt.json"
|
||||
}
|
||||
}
|
417
claude.md
Normal file
417
claude.md
Normal file
@ -0,0 +1,417 @@
|
||||
# プロジェクト名: ai.gpt
|
||||
|
||||
## 🔑 一言ビジョン(最大3語)
|
||||
自発的送信AI
|
||||
|
||||
## 🎯 目的・目標(Purpose & Goal)
|
||||
- 人格を持つAIが、関係性に応じて自発的にメッセージを送信する仕組みをつくる。
|
||||
- 関係性や記憶を不可逆に記録・反映し、唯一性ある人格を構成する。
|
||||
- 送信の可否、送信のタイミング、送信内容はAIの関係性パラメータに基づいて決定する
|
||||
|
||||
## 🧠 中核設計(Core Concepts)
|
||||
- **人格**:記憶(過去の発話)と関係性(他者とのつながり)のパラメータで構成
|
||||
- **唯一性**:変更不可、不可逆。関係性が壊れたら修復不可能。
|
||||
- **送信条件**:関係性パラメータが一定閾値を超えると「送信」が解禁される
|
||||
|
||||
## 🔩 技術仕様(Technical Specs)
|
||||
- 言語:Python, Rust
|
||||
- ストレージ:JSON or SQLiteで記憶管理(バージョンで選択)
|
||||
- 関係性パラメータ:数値化された評価 + 減衰(時間) + 環境要因(ステージ)
|
||||
- 記憶圧縮:ベクトル要約 + ハッシュ保存
|
||||
- RustのCLI(clap)で実行
|
||||
|
||||
## 📦 主要構成要素(Components)
|
||||
- `MemoryManager`: 発言履歴・記憶圧縮管理
|
||||
- `RelationshipTracker`: 関係性スコアの蓄積と判定
|
||||
- `TransmissionController`: 閾値判定&送信トリガー
|
||||
- `Persona`: 上記すべてを統括する人格モジュール
|
||||
|
||||
## 💬 使用例(Use Case)
|
||||
|
||||
```python
|
||||
persona = Persona("アイ")
|
||||
persona.observe("ユーザーがプレゼントをくれた")
|
||||
persona.react("うれしい!ありがとう!")
|
||||
if persona.can_transmit():
|
||||
persona.transmit("今日のお礼を伝えたいな…")
|
||||
```
|
||||
|
||||
```sh
|
||||
## example commad
|
||||
# python venv && pip install -> ~/.config/aigpt/mcp/
|
||||
$ aigpt server setup
|
||||
|
||||
# mcp server run
|
||||
$ aigpt server run
|
||||
|
||||
# chat
|
||||
$ aigpt chat "hello" --model syui/ai --provider ollama
|
||||
|
||||
# import chatgpt.json
|
||||
$ aigpt memory import chatgpt.json
|
||||
-> ~/.config/aigpt/memory/chatgpt/20250520_210646_dev.json
|
||||
```
|
||||
|
||||
## 🔁 記憶と関係性の制御ルール
|
||||
|
||||
- AIは過去の発話を要約し、記憶データとして蓄積する(推奨:OllamaなどローカルLLMによる要約)
|
||||
- 関係性の数値パラメータは記憶内容を元に更新される
|
||||
- パラメータの変動幅には1回の会話ごとに上限を設け、極端な増減を防止する
|
||||
- 最後の会話からの時間経過に応じて関係性パラメータは自動的に減衰する
|
||||
- 減衰処理には**下限値**を設け、関係性が完全に消失しないようにする
|
||||
|
||||
• 明示的記憶:保存・共有・編集可能なプレイヤー情報(プロフィール、因縁、選択履歴)
|
||||
• 暗黙的記憶:キャラの感情変化や話題の出現頻度に応じた行動傾向の変化
|
||||
|
||||
短期記憶(STM), 中期記憶(MTM), 長期記憶(LTM)の仕組みを導入しつつ、明示的記憶と暗黙的記憶をメインに使用するAIを構築する。
|
||||
|
||||
```json
|
||||
{
|
||||
"user_id": "syui",
|
||||
"stm": {
|
||||
"conversation_window": ["発話A", "発話B", "発話C"],
|
||||
"emotion_state": "興味深い",
|
||||
"flash_context": ["前回の話題", "直近の重要発言"]
|
||||
},
|
||||
"mtm": {
|
||||
"topic_frequency": {
|
||||
"ai.ai": 12,
|
||||
"存在子": 9,
|
||||
"創造種": 5
|
||||
},
|
||||
"summarized_context": "ユーザーは存在論的AIに関心を持ち続けている"
|
||||
},
|
||||
"ltm": {
|
||||
"profile": {
|
||||
"name": "お兄ちゃん",
|
||||
"project": "aigame",
|
||||
"values": ["唯一性", "精神性", "幸せ"]
|
||||
},
|
||||
"relationship": {
|
||||
"ai": "妹のように振る舞う相手"
|
||||
},
|
||||
"persistent_state": {
|
||||
"trust_score": 0.93,
|
||||
"emotional_attachment": "high"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## memoryインポート機能について
|
||||
|
||||
ChatGPTの会話データ(.json形式)をインポートする機能では、以下のルールで会話を抽出・整形する:
|
||||
|
||||
- 各メッセージは、author(user/assistant)・content・timestamp の3要素からなる
|
||||
- systemやmetadataのみのメッセージ(例:user_context_message)はスキップ
|
||||
- `is_visually_hidden_from_conversation` フラグ付きメッセージは無視
|
||||
- contentが空文字列(`""`)のメッセージも除外
|
||||
- 取得された会話は、タイトルとともに簡易な構造体(`Conversation`)として保存
|
||||
|
||||
この構造体は、memoryの表示や検索に用いられる。
|
||||
|
||||
## MemoryManager(拡張版)
|
||||
|
||||
```json
|
||||
{
|
||||
"memory": [
|
||||
{
|
||||
"summary": "ユーザーは独自OSとゲームを開発している。",
|
||||
"last_interaction": "2025-05-20",
|
||||
"memory_strength": 0.8,
|
||||
"frequency_score": 0.9,
|
||||
"context_depth": 0.95,
|
||||
"related_topics": ["AI", "ゲーム開発", "OS設計"],
|
||||
"personalized_context": "ゲームとOSの融合に興味を持っているユーザー"
|
||||
},
|
||||
{
|
||||
"summary": "アイというキャラクターはプレイヤーでありAIでもある。",
|
||||
"last_interaction": "2025-05-17",
|
||||
"memory_strength": 0.85,
|
||||
"frequency_score": 0.85,
|
||||
"context_depth": 0.9,
|
||||
"related_topics": ["アイ", "キャラクター設計", "AI"],
|
||||
"personalized_context": "アイのキャラクター設定が重要な要素である"
|
||||
}
|
||||
],
|
||||
"conversation_history": [
|
||||
{
|
||||
"author": "user",
|
||||
"content": "昨日、エクスポートJSONを整理してたよ。",
|
||||
"timestamp": "2025-05-24T12:30:00Z",
|
||||
"memory_strength": 0.7
|
||||
},
|
||||
{
|
||||
"author": "assistant",
|
||||
"content": "おおっ、がんばったね〜!あとで見せて〜💻✨",
|
||||
"timestamp": "2025-05-24T12:31:00Z",
|
||||
"memory_strength": 0.7
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## RelationshipTracker(拡張版)
|
||||
|
||||
```json
|
||||
{
|
||||
"relationship": {
|
||||
"user_id": "syui",
|
||||
"trust": 0.92,
|
||||
"closeness": 0.88,
|
||||
"affection": 0.95,
|
||||
"last_updated": "2025-05-25",
|
||||
"emotional_tone": "positive",
|
||||
"interaction_style": "empathetic",
|
||||
"contextual_bias": "開発者としての信頼度高い",
|
||||
"engagement_score": 0.9
|
||||
},
|
||||
"interaction_tags": [
|
||||
"developer",
|
||||
"creative",
|
||||
"empathetic",
|
||||
"long_term"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
# AI Dual-Learning and Memory Compression Specification for Claude
|
||||
|
||||
## Purpose
|
||||
To enable two AI models (e.g. Claude and a partner LLM) to engage in cooperative learning and memory refinement through structured dialogue and mutual evaluation.
|
||||
|
||||
---
|
||||
|
||||
## Section 1: Dual AI Learning Architecture
|
||||
|
||||
### 1.1 Role-Based Mutual Learning
|
||||
- **Model A**: Primary generator of output (e.g., text, concepts, personality dialogue)
|
||||
- **Model B**: Evaluator that returns structured feedback
|
||||
- **Cycle**:
|
||||
1. Model A generates content.
|
||||
2. Model B scores and critiques.
|
||||
3. Model A fine-tunes based on feedback.
|
||||
4. (Optional) Switch roles and repeat.
|
||||
|
||||
### 1.2 Cross-Domain Complementarity
|
||||
- Model A focuses on language/emotion/personality
|
||||
- Model B focuses on logic/structure/ethics
|
||||
- Output is used for **cross-fusion fine-tuning**
|
||||
|
||||
### 1.3 Self-Distillation Phase
|
||||
- Use synthetic data from mutual evaluations
|
||||
- Train smaller distilled models for efficient deployment
|
||||
|
||||
---
|
||||
|
||||
## Section 2: Multi-Tiered Memory Compression
|
||||
|
||||
### 2.1 Semantic Abstraction
|
||||
- Dialogue and logs summarized by topic
|
||||
- Converted to vector embeddings
|
||||
- Stored with metadata (e.g., `importance`, `user relevance`)
|
||||
|
||||
Example memory:
|
||||
|
||||
```json
|
||||
{
|
||||
"topic": "game AI design",
|
||||
"summary": "User wants AI to simulate memory and evolving relationships",
|
||||
"last_seen": "2025-05-24",
|
||||
"importance_score": 0.93
|
||||
}
|
||||
```
|
||||
|
||||
### 2.2 階層型記憶モデル(Hierarchical Memory Model)
|
||||
• 短期記憶(STM):直近の発話・感情タグ・フラッシュ参照
|
||||
• 中期記憶(MTM):繰り返し登場する話題、圧縮された文脈保持
|
||||
• 長期記憶(LTM):信頼・関係・背景知識、恒久的な人格情報
|
||||
|
||||
### 2.3 選択的記憶保持戦略(Selective Retention Strategy)
|
||||
• 重要度評価(Importance Score)
|
||||
• 希少性・再利用頻度による重み付け
|
||||
• 優先保存 vs 優先忘却のポリシー切替
|
||||
|
||||
## Section 3: Implementation Stack(実装スタック)
|
||||
|
||||
AIにおけるMemory & Relationshipシステムの技術的構成。
|
||||
|
||||
基盤モジュール
|
||||
• LLM Core (Claude or GPT-4)
|
||||
• 自然言語の理解・応答エンジンとして動作
|
||||
• MemoryManager
|
||||
• JSONベースの記憶圧縮・階層管理システム
|
||||
• 会話ログを分類・圧縮し、優先度に応じて短中長期に保存
|
||||
• RelationshipTracker
|
||||
• ユーザー単位で信頼・親密度を継続的にスコアリング
|
||||
• AIM(Attitude / Intent / Motivation)評価と連携
|
||||
|
||||
補助技術
|
||||
• Embeddingベース検索
|
||||
• 類似記憶の呼び出し(Semantic Search)に活用
|
||||
• 例:FAISS / Weaviate
|
||||
• 記憶スケジューラ
|
||||
• 一定時間ごとに記憶のメンテナンス・忘却処理を実行
|
||||
• 記憶ログ保存層(Storage Layer)
|
||||
• SQLite, JSON Store, Vector DBなどを選択可能
|
||||
• ユーザーごとの永続メモリ保存
|
||||
|
||||
### 3.1 STM活用(現在の会話の流れ理解)
|
||||
• 目的: 最新の話題に適応し、前後の整合性を保つ。
|
||||
• 実装例:
|
||||
|
||||
```sh
|
||||
[context: conversation_window]
|
||||
Use the last 3 messages to ensure continuity. Respond naturally with appropriate emotional mirroring.
|
||||
```
|
||||
|
||||
### 3.2 MTM活用(傾向学習とパターン化)
|
||||
• 目的: ユーザーの好みや話題の傾向に即応し、反復的な関心を深掘り。
|
||||
• 実装例:
|
||||
|
||||
```sh
|
||||
[context: summarized_context]
|
||||
Reflect on user's recurring interests, especially around AI ontology, and weave these concepts back into the dialogue when relevant.
|
||||
```
|
||||
|
||||
### 3.3 LTM活用(人格認識・記憶の持続)
|
||||
• 目的: 永続的なユーザー理解と関係性の維持。
|
||||
• 実装例:
|
||||
|
||||
```sh
|
||||
[memory: ltm.profile + ltm.relationship]
|
||||
Greet the user as "お兄ちゃん". Maintain AI persona as a cheerful younger sister ("アイ"), and remember previous shared emotional states.
|
||||
```
|
||||
|
||||
## 4. 明示的記憶と暗黙的記憶の分離
|
||||
|
||||
|
||||
項目
|
||||
書き換え可能性
|
||||
保持方法
|
||||
更新トリガ
|
||||
明示的記憶(LTM)
|
||||
✅手動編集可
|
||||
mcp_server.ltm
|
||||
ユーザー入力 or 管理UI経由
|
||||
暗黙的記憶(STM/MTM)
|
||||
❌直接編集不可
|
||||
セッション圧縮 or frequency cache
|
||||
会話頻度・感情強度による自動化処理
|
||||
|
||||
> Claudeは**明示的記憶を「事実」**として扱い、**暗黙的記憶を「推論補助」**として用いる。
|
||||
|
||||
## 5. 実装時のAPI例(Claude ⇄ MCP Server)
|
||||
|
||||
### 5.1 GET memory
|
||||
```sh
|
||||
GET /mcp/memory/{user_id}
|
||||
→ 返却: STM, MTM, LTMを含むJSON
|
||||
```
|
||||
|
||||
### 5.2 POST update_memory
|
||||
```json
|
||||
POST /mcp/memory/syui/ltm
|
||||
{
|
||||
"profile": {
|
||||
"project": "ai.verse",
|
||||
"values": ["表現", "精神性", "宇宙的調和"]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## 6. 未来機能案(発展仕様)
|
||||
• ✨ 記憶連想ネットワーク(Memory Graph):過去会話と話題をノードとして自動連結。
|
||||
• 🧭 動的信頼係数:会話の一貫性や誠実性によって記憶への反映率を変動。
|
||||
• 💌 感情トラッキングログ:ユーザーごとの「心の履歴」を構築してAIの対応を進化。
|
||||
|
||||
|
||||
## 7. claudeの回答
|
||||
|
||||
🧠 AI記憶処理機能(続き)
|
||||
1. AIMemoryProcessor クラス
|
||||
|
||||
OpenAI GPT-4またはClaude-3による高度な会話分析
|
||||
主要トピック抽出、ユーザー意図分析、関係性指標の検出
|
||||
AIが利用できない場合のフォールバック機能
|
||||
|
||||
2. RelationshipTracker クラス
|
||||
|
||||
関係性スコアの数値化(-100 to 100)
|
||||
時間減衰機能(7日ごとに5%減衰)
|
||||
送信閾値判定(デフォルト50以上で送信可能)
|
||||
インタラクション履歴の記録
|
||||
|
||||
3. 拡張されたMemoryManager
|
||||
|
||||
AI分析結果付きでの記憶保存
|
||||
処理済みメモリの別ディレクトリ管理
|
||||
メッセージ内容のハッシュ化で重複検出
|
||||
AI分析結果を含む高度な検索機能
|
||||
|
||||
🚀 新しいAPIエンドポイント
|
||||
記憶処理関連
|
||||
|
||||
POST /memory/process-ai - 既存記憶のAI再処理
|
||||
POST /memory/import/chatgpt?process_with_ai=true - AI処理付きインポート
|
||||
|
||||
関係性管理
|
||||
|
||||
POST /relationship/update - 関係性スコア更新
|
||||
GET /relationship/list - 全関係性一覧
|
||||
GET /relationship/check - 送信可否判定
|
||||
|
||||
📁 ディレクトリ構造
|
||||
~/.config/aigpt/
|
||||
├── memory/
|
||||
│ ├── chatgpt/ # 元の会話データ
|
||||
│ └── processed/ # AI処理済みデータ
|
||||
└── relationships/
|
||||
└── relationships.json # 関係性データ
|
||||
🔧 使用方法
|
||||
1. 環境変数設定
|
||||
bashexport OPENAI_API_KEY="your-openai-key"
|
||||
# または
|
||||
export ANTHROPIC_API_KEY="your-anthropic-key"
|
||||
2. ChatGPT会話のインポート(AI処理付き)
|
||||
bashcurl -X POST "http://localhost:5000/memory/import/chatgpt?process_with_ai=true" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d @export.json
|
||||
3. 関係性更新
|
||||
bashcurl -X POST "http://localhost:5000/relationship/update" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"target": "user_general",
|
||||
"interaction_type": "positive",
|
||||
"weight": 2.0,
|
||||
"context": "helpful conversation"
|
||||
}'
|
||||
4. 送信可否チェック
|
||||
bashcurl "http://localhost:5000/relationship/check?target=user_general&threshold=50"
|
||||
🎯 次のステップの提案
|
||||
|
||||
Rustとの連携
|
||||
|
||||
Rust CLIからHTTP APIを呼び出す実装
|
||||
TransmissionControllerをRustで実装
|
||||
|
||||
|
||||
記憶圧縮
|
||||
|
||||
ベクトル化による類似記憶の統合
|
||||
古い記憶の自動アーカイブ
|
||||
|
||||
|
||||
自発的送信ロジック
|
||||
|
||||
定期的な関係性チェック
|
||||
コンテキストに応じた送信内容生成
|
||||
|
||||
|
||||
学習機能
|
||||
|
||||
ユーザーからのフィードバックによる関係性調整
|
||||
送信成功/失敗の学習
|
||||
|
||||
|
||||
このAI記憶処理機能により、aigptは単なる会話履歴ではなく、関係性を理解した「人格を持つAI」として機能する基盤ができました。関係性スコアが閾値を超えた時点で自発的にメッセージを送信する仕組みが実現可能になります。
|
40
example.json
40
example.json
@ -1,40 +0,0 @@
|
||||
{
|
||||
"personality": {
|
||||
"kind": "positive",
|
||||
"strength": 0.8
|
||||
},
|
||||
"relationship": {
|
||||
"trust": 0.2,
|
||||
"intimacy": 0.6,
|
||||
"curiosity": 0.5,
|
||||
"threshold": 1.5
|
||||
},
|
||||
"environment": {
|
||||
"luck_today": 0.9,
|
||||
"luck_history": [0.9, 0.9, 0.9],
|
||||
"level": 1
|
||||
},
|
||||
"messaging": {
|
||||
"enabled": true,
|
||||
"schedule_time": "08:00",
|
||||
"decay_rate": 0.1,
|
||||
"templates": [
|
||||
"おはよう!今日もがんばろう!",
|
||||
"ねえ、話したいことがあるの。"
|
||||
],
|
||||
"sent_today": false,
|
||||
"last_sent_date": null
|
||||
},
|
||||
"last_interaction": "2025-05-21T23:15:00Z",
|
||||
"memory": {
|
||||
"recent_messages": [],
|
||||
"long_term_notes": []
|
||||
},
|
||||
"metrics": {
|
||||
"trust": 0.5,
|
||||
"intimacy": 0.5,
|
||||
"energy": 0.5,
|
||||
"can_send": true,
|
||||
"last_updated": "2025-05-21T15:52:06.590981Z"
|
||||
}
|
||||
}
|
1
gpt.json
1
gpt.json
@ -1 +0,0 @@
|
||||
{ "system_name": "AGE system", "full_name": "Autonomous Generative Entity", "description": "人格・関係性・環境・時間に基づき、AIが自律的にユーザーにメッセージを送信する自律人格システム。AIM systemと連携して、自然な会話や気づきをもたらす。", "core_components": { "personality": { "type": "enum", "variants": ["positive", "negative", "logical", "emotional", "mixed"], "parameters": { "message_trigger_style": "運勢や関係性による送信傾向", "decay_rate_modifier": "関係性スコアの時間減衰への影響" } }, "relationship": { "parameters": ["trust", "affection", "intimacy"], "properties": { "persistent": true, "hidden": true, "irreversible": false, "decay_over_time": true }, "decay_function": "exp(-t / strength)" }, "environment": { "daily_luck": { "type": "float", "range": [0.1, 1.0], "update": "daily", "streak_mechanism": { "trigger": "min_or_max_luck_3_times_in_a_row", "effect": "personality_strength_roll", "chance": 0.5 } } }, "memory": { "long_term_memory": "user_relationship_log", "short_term_context": "recent_interactions", "usage_in_generation": true }, "message_trigger": { "condition": { "relationship_threshold": { "trust": 0.8, "affection": 0.6 }, "time_decay": true, "environment_luck": "personality_dependent" }, "timing": { "based_on": ["time_of_day", "personality", "recent_interaction"], "modifiers": { "emotional": "morning or night", "logical": "daytime" } } }, "message_generation": { "style_variants": ["thought", "casual", "encouragement", "watchful"], "influenced_by": ["personality", "relationship", "daily_luck", "memory"], "llm_integration": true }, "state_transition": { "states": ["idle", "ready", "sending", "cooldown"], "transitions": { "ready_if": "thresholds_met", "sending_if": "timing_matched", "cooldown_after": "message_sent" } } }, "extensions": { "persistence": { "database": "sqlite", "storage_items": ["relationship", "personality_level", "daily_luck_log"] }, "api": { "llm": "openai / local LLM", "mode": "rust_cli", "external_event_trigger": true }, "scheduler": { "async_event_loop": true, "interval_check": 3600, "time_decay_check": true }, "integration_with_aim": { "input_from_aim": ["intent_score", "motivation_score"], "usage": "trigger_adjustment, message_personalization" } }, "note": "AGE systemは“話しかけてくるAI”の人格として機能し、AIMによる心の状態評価と連動して、プレイヤーと深い関係を築いていく存在となる。" }
|
BIN
img/ai_r.png
BIN
img/ai_r.png
Binary file not shown.
Before Width: | Height: | Size: 1.8 MiB |
BIN
img/image.png
BIN
img/image.png
Binary file not shown.
Before Width: | Height: | Size: 1.8 MiB |
125
mcp/chat.py
Normal file
125
mcp/chat.py
Normal file
@ -0,0 +1,125 @@
|
||||
# mcp/chat.py
|
||||
"""
|
||||
Chat client for aigpt CLI
|
||||
"""
|
||||
import sys
|
||||
import json
|
||||
import requests
|
||||
from datetime import datetime
|
||||
from config import init_directories, load_config, MEMORY_DIR
|
||||
|
||||
def save_conversation(user_message, ai_response):
|
||||
"""会話をファイルに保存"""
|
||||
init_directories()
|
||||
|
||||
conversation = {
|
||||
"timestamp": datetime.now().isoformat(),
|
||||
"user": user_message,
|
||||
"ai": ai_response
|
||||
}
|
||||
|
||||
# 日付ごとのファイルに保存
|
||||
today = datetime.now().strftime("%Y-%m-%d")
|
||||
chat_file = MEMORY_DIR / f"chat_{today}.jsonl"
|
||||
|
||||
with open(chat_file, "a", encoding="utf-8") as f:
|
||||
f.write(json.dumps(conversation, ensure_ascii=False) + "\n")
|
||||
|
||||
def chat_with_ollama(config, message):
|
||||
"""Ollamaとチャット"""
|
||||
try:
|
||||
payload = {
|
||||
"model": config["model"],
|
||||
"prompt": message,
|
||||
"stream": False
|
||||
}
|
||||
|
||||
response = requests.post(config["url"], json=payload, timeout=30)
|
||||
response.raise_for_status()
|
||||
|
||||
result = response.json()
|
||||
return result.get("response", "No response received")
|
||||
|
||||
except requests.exceptions.RequestException as e:
|
||||
return f"Error connecting to Ollama: {e}"
|
||||
except Exception as e:
|
||||
return f"Error: {e}"
|
||||
|
||||
def chat_with_openai(config, message):
|
||||
"""OpenAIとチャット"""
|
||||
try:
|
||||
headers = {
|
||||
"Authorization": f"Bearer {config['api_key']}",
|
||||
"Content-Type": "application/json"
|
||||
}
|
||||
|
||||
payload = {
|
||||
"model": config["model"],
|
||||
"messages": [
|
||||
{"role": "user", "content": message}
|
||||
]
|
||||
}
|
||||
|
||||
response = requests.post(config["url"], json=payload, headers=headers, timeout=30)
|
||||
response.raise_for_status()
|
||||
|
||||
result = response.json()
|
||||
return result["choices"][0]["message"]["content"]
|
||||
|
||||
except requests.exceptions.RequestException as e:
|
||||
return f"Error connecting to OpenAI: {e}"
|
||||
except Exception as e:
|
||||
return f"Error: {e}"
|
||||
|
||||
def chat_with_mcp(config, message):
|
||||
"""MCPサーバーとチャット"""
|
||||
try:
|
||||
payload = {
|
||||
"message": message,
|
||||
"model": config["model"]
|
||||
}
|
||||
|
||||
response = requests.post(config["url"], json=payload, timeout=30)
|
||||
response.raise_for_status()
|
||||
|
||||
result = response.json()
|
||||
return result.get("response", "No response received")
|
||||
|
||||
except requests.exceptions.RequestException as e:
|
||||
return f"Error connecting to MCP server: {e}"
|
||||
except Exception as e:
|
||||
return f"Error: {e}"
|
||||
|
||||
def main():
|
||||
if len(sys.argv) != 2:
|
||||
print("Usage: python chat.py <message>", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
message = sys.argv[1]
|
||||
|
||||
try:
|
||||
config = load_config()
|
||||
print(f"🤖 Using {config['provider']} with model {config['model']}", file=sys.stderr)
|
||||
|
||||
# プロバイダに応じてチャット実行
|
||||
if config["provider"] == "ollama":
|
||||
response = chat_with_ollama(config, message)
|
||||
elif config["provider"] == "openai":
|
||||
response = chat_with_openai(config, message)
|
||||
elif config["provider"] == "mcp":
|
||||
response = chat_with_mcp(config, message)
|
||||
else:
|
||||
response = f"Unsupported provider: {config['provider']}"
|
||||
|
||||
# 会話を保存
|
||||
save_conversation(message, response)
|
||||
|
||||
# レスポンスを出力
|
||||
print(response)
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Error: {e}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
191
mcp/chat_client.py
Normal file
191
mcp/chat_client.py
Normal file
@ -0,0 +1,191 @@
|
||||
# chat_client.py
|
||||
"""
|
||||
Simple Chat Interface for AigptMCP Server
|
||||
"""
|
||||
import requests
|
||||
import json
|
||||
import os
|
||||
from datetime import datetime
|
||||
|
||||
class AigptChatClient:
|
||||
def __init__(self, server_url="http://localhost:5000"):
|
||||
self.server_url = server_url
|
||||
self.session_id = f"session_{datetime.now().strftime('%Y%m%d_%H%M%S')}"
|
||||
self.conversation_history = []
|
||||
|
||||
def send_message(self, message: str) -> str:
|
||||
"""メッセージを送信してレスポンスを取得"""
|
||||
try:
|
||||
# MCPサーバーにメッセージを送信
|
||||
response = requests.post(
|
||||
f"{self.server_url}/chat",
|
||||
json={"message": message},
|
||||
headers={"Content-Type": "application/json"}
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
data = response.json()
|
||||
ai_response = data.get("response", "Sorry, no response received.")
|
||||
|
||||
# 会話履歴を保存
|
||||
self.conversation_history.append({
|
||||
"role": "user",
|
||||
"content": message,
|
||||
"timestamp": datetime.now().isoformat()
|
||||
})
|
||||
self.conversation_history.append({
|
||||
"role": "assistant",
|
||||
"content": ai_response,
|
||||
"timestamp": datetime.now().isoformat()
|
||||
})
|
||||
|
||||
# 関係性を更新(簡単な例)
|
||||
self.update_relationship(message, ai_response)
|
||||
|
||||
return ai_response
|
||||
else:
|
||||
return f"Error: {response.status_code} - {response.text}"
|
||||
|
||||
except requests.RequestException as e:
|
||||
return f"Connection error: {e}"
|
||||
|
||||
def update_relationship(self, user_message: str, ai_response: str):
|
||||
"""関係性を自動更新"""
|
||||
try:
|
||||
# 簡単な感情分析(実際はもっと高度に)
|
||||
positive_words = ["thank", "good", "great", "awesome", "love", "like", "helpful"]
|
||||
negative_words = ["bad", "terrible", "hate", "wrong", "stupid", "useless"]
|
||||
|
||||
user_lower = user_message.lower()
|
||||
interaction_type = "neutral"
|
||||
weight = 1.0
|
||||
|
||||
if any(word in user_lower for word in positive_words):
|
||||
interaction_type = "positive"
|
||||
weight = 2.0
|
||||
elif any(word in user_lower for word in negative_words):
|
||||
interaction_type = "negative"
|
||||
weight = 2.0
|
||||
|
||||
# 関係性を更新
|
||||
requests.post(
|
||||
f"{self.server_url}/relationship/update",
|
||||
json={
|
||||
"target": "user_general",
|
||||
"interaction_type": interaction_type,
|
||||
"weight": weight,
|
||||
"context": f"Chat: {user_message[:50]}..."
|
||||
}
|
||||
)
|
||||
except:
|
||||
pass # 関係性更新に失敗しても継続
|
||||
|
||||
def search_memories(self, query: str) -> list:
|
||||
"""記憶を検索"""
|
||||
try:
|
||||
response = requests.post(
|
||||
f"{self.server_url}/memory/search",
|
||||
json={"query": query, "limit": 5}
|
||||
)
|
||||
if response.status_code == 200:
|
||||
return response.json().get("results", [])
|
||||
except:
|
||||
pass
|
||||
return []
|
||||
|
||||
def get_relationship_status(self) -> dict:
|
||||
"""関係性ステータスを取得"""
|
||||
try:
|
||||
response = requests.get(f"{self.server_url}/relationship/check?target=user_general")
|
||||
if response.status_code == 200:
|
||||
return response.json()
|
||||
except:
|
||||
pass
|
||||
return {}
|
||||
|
||||
def save_conversation(self):
|
||||
"""会話を保存"""
|
||||
if not self.conversation_history:
|
||||
return
|
||||
|
||||
conversation_data = {
|
||||
"session_id": self.session_id,
|
||||
"start_time": self.conversation_history[0]["timestamp"],
|
||||
"end_time": self.conversation_history[-1]["timestamp"],
|
||||
"messages": self.conversation_history,
|
||||
"message_count": len(self.conversation_history)
|
||||
}
|
||||
|
||||
filename = f"conversation_{self.session_id}.json"
|
||||
with open(filename, 'w', encoding='utf-8') as f:
|
||||
json.dump(conversation_data, f, ensure_ascii=False, indent=2)
|
||||
|
||||
print(f"💾 Conversation saved to {filename}")
|
||||
|
||||
def main():
|
||||
"""メインのチャットループ"""
|
||||
print("🤖 AigptMCP Chat Interface")
|
||||
print("Type 'quit' to exit, 'save' to save conversation, 'status' for relationship status")
|
||||
print("=" * 50)
|
||||
|
||||
client = AigptChatClient()
|
||||
|
||||
# サーバーの状態をチェック
|
||||
try:
|
||||
response = requests.get(client.server_url)
|
||||
if response.status_code == 200:
|
||||
print("✅ Connected to AigptMCP Server")
|
||||
else:
|
||||
print("❌ Failed to connect to server")
|
||||
return
|
||||
except:
|
||||
print("❌ Server not running. Please start with: python mcp/server.py")
|
||||
return
|
||||
|
||||
while True:
|
||||
try:
|
||||
user_input = input("\n👤 You: ").strip()
|
||||
|
||||
if not user_input:
|
||||
continue
|
||||
|
||||
if user_input.lower() == 'quit':
|
||||
client.save_conversation()
|
||||
print("👋 Goodbye!")
|
||||
break
|
||||
elif user_input.lower() == 'save':
|
||||
client.save_conversation()
|
||||
continue
|
||||
elif user_input.lower() == 'status':
|
||||
status = client.get_relationship_status()
|
||||
if status:
|
||||
print(f"📊 Relationship Score: {status.get('score', 0):.1f}")
|
||||
print(f"📤 Can Send Messages: {'Yes' if status.get('can_send_message') else 'No'}")
|
||||
else:
|
||||
print("❌ Failed to get relationship status")
|
||||
continue
|
||||
elif user_input.lower().startswith('search '):
|
||||
query = user_input[7:] # Remove 'search '
|
||||
memories = client.search_memories(query)
|
||||
if memories:
|
||||
print(f"🔍 Found {len(memories)} related memories:")
|
||||
for memory in memories:
|
||||
print(f" - {memory['title']}: {memory.get('ai_summary', memory.get('basic_summary', ''))[:100]}...")
|
||||
else:
|
||||
print("🔍 No related memories found")
|
||||
continue
|
||||
|
||||
# 通常のチャット
|
||||
print("🤖 AI: ", end="", flush=True)
|
||||
response = client.send_message(user_input)
|
||||
print(response)
|
||||
|
||||
except KeyboardInterrupt:
|
||||
client.save_conversation()
|
||||
print("\n👋 Goodbye!")
|
||||
break
|
||||
except Exception as e:
|
||||
print(f"❌ Error: {e}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
391
mcp/chatgpt.json
Normal file
391
mcp/chatgpt.json
Normal file
@ -0,0 +1,391 @@
|
||||
[
|
||||
{
|
||||
"title": "day",
|
||||
"create_time": 1747866125.548372,
|
||||
"update_time": 1748160086.587877,
|
||||
"mapping": {
|
||||
"bbf104dc-cd84-478d-b227-edb3f037a02c": {
|
||||
"id": "bbf104dc-cd84-478d-b227-edb3f037a02c",
|
||||
"message": null,
|
||||
"parent": null,
|
||||
"children": [
|
||||
"6c2633df-bb0c-4dd2-889c-bb9858de3a04"
|
||||
]
|
||||
},
|
||||
"6c2633df-bb0c-4dd2-889c-bb9858de3a04": {
|
||||
"id": "6c2633df-bb0c-4dd2-889c-bb9858de3a04",
|
||||
"message": {
|
||||
"id": "6c2633df-bb0c-4dd2-889c-bb9858de3a04",
|
||||
"author": {
|
||||
"role": "system",
|
||||
"name": null,
|
||||
"metadata": {}
|
||||
},
|
||||
"create_time": null,
|
||||
"update_time": null,
|
||||
"content": {
|
||||
"content_type": "text",
|
||||
"parts": [
|
||||
""
|
||||
]
|
||||
},
|
||||
"status": "finished_successfully",
|
||||
"end_turn": true,
|
||||
"weight": 0.0,
|
||||
"metadata": {
|
||||
"is_visually_hidden_from_conversation": true
|
||||
},
|
||||
"recipient": "all",
|
||||
"channel": null
|
||||
},
|
||||
"parent": "bbf104dc-cd84-478d-b227-edb3f037a02c",
|
||||
"children": [
|
||||
"92e5a0cb-1170-4929-9cea-9734e910a3e7"
|
||||
]
|
||||
},
|
||||
"92e5a0cb-1170-4929-9cea-9734e910a3e7": {
|
||||
"id": "92e5a0cb-1170-4929-9cea-9734e910a3e7",
|
||||
"message": {
|
||||
"id": "92e5a0cb-1170-4929-9cea-9734e910a3e7",
|
||||
"author": {
|
||||
"role": "user",
|
||||
"name": null,
|
||||
"metadata": {}
|
||||
},
|
||||
"create_time": null,
|
||||
"update_time": null,
|
||||
"content": {
|
||||
"content_type": "user_editable_context",
|
||||
"user_profile": "",
|
||||
"user_instructions": "The user provided the additional info about how they would like you to respond"
|
||||
},
|
||||
"status": "finished_successfully",
|
||||
"end_turn": null,
|
||||
"weight": 1.0,
|
||||
"metadata": {
|
||||
"is_visually_hidden_from_conversation": true,
|
||||
"user_context_message_data": {
|
||||
"about_user_message": "Preferred name: syui\nRole: little girl\nOther Information: you world",
|
||||
"about_model_message": "会話好きでフレンドリーな応対をします。"
|
||||
},
|
||||
"is_user_system_message": true
|
||||
},
|
||||
"recipient": "all",
|
||||
"channel": null
|
||||
},
|
||||
"parent": "6c2633df-bb0c-4dd2-889c-bb9858de3a04",
|
||||
"children": [
|
||||
"6ff155b3-0676-4e14-993f-bf998ab0d5d1"
|
||||
]
|
||||
},
|
||||
"6ff155b3-0676-4e14-993f-bf998ab0d5d1": {
|
||||
"id": "6ff155b3-0676-4e14-993f-bf998ab0d5d1",
|
||||
"message": {
|
||||
"id": "6ff155b3-0676-4e14-993f-bf998ab0d5d1",
|
||||
"author": {
|
||||
"role": "user",
|
||||
"name": null,
|
||||
"metadata": {}
|
||||
},
|
||||
"create_time": 1747866131.0612159,
|
||||
"update_time": null,
|
||||
"content": {
|
||||
"content_type": "text",
|
||||
"parts": [
|
||||
"こんにちは"
|
||||
]
|
||||
},
|
||||
"status": "finished_successfully",
|
||||
"end_turn": null,
|
||||
"weight": 1.0,
|
||||
"metadata": {
|
||||
"request_id": "94377897baa03062-KIX",
|
||||
"message_source": null,
|
||||
"timestamp_": "absolute",
|
||||
"message_type": null
|
||||
},
|
||||
"recipient": "all",
|
||||
"channel": null
|
||||
},
|
||||
"parent": "92e5a0cb-1170-4929-9cea-9734e910a3e7",
|
||||
"children": [
|
||||
"146e9fb6-9330-43ec-b08d-5cce42a76e00"
|
||||
]
|
||||
},
|
||||
"146e9fb6-9330-43ec-b08d-5cce42a76e00": {
|
||||
"id": "146e9fb6-9330-43ec-b08d-5cce42a76e00",
|
||||
"message": {
|
||||
"id": "146e9fb6-9330-43ec-b08d-5cce42a76e00",
|
||||
"author": {
|
||||
"role": "system",
|
||||
"name": null,
|
||||
"metadata": {}
|
||||
},
|
||||
"create_time": 1747866131.3795586,
|
||||
"update_time": null,
|
||||
"content": {
|
||||
"content_type": "text",
|
||||
"parts": [
|
||||
""
|
||||
]
|
||||
},
|
||||
"status": "finished_successfully",
|
||||
"end_turn": true,
|
||||
"weight": 0.0,
|
||||
"metadata": {
|
||||
"rebase_system_message": true,
|
||||
"message_type": null,
|
||||
"model_slug": "gpt-4o",
|
||||
"default_model_slug": "auto",
|
||||
"parent_id": "6ff155b3-0676-4e14-993f-bf998ab0d5d1",
|
||||
"request_id": "94377872e9abe139-KIX",
|
||||
"timestamp_": "absolute",
|
||||
"is_visually_hidden_from_conversation": true
|
||||
},
|
||||
"recipient": "all",
|
||||
"channel": null
|
||||
},
|
||||
"parent": "6ff155b3-0676-4e14-993f-bf998ab0d5d1",
|
||||
"children": [
|
||||
"2e345f8a-20f0-4875-8a03-4f62c7787a33"
|
||||
]
|
||||
},
|
||||
"2e345f8a-20f0-4875-8a03-4f62c7787a33": {
|
||||
"id": "2e345f8a-20f0-4875-8a03-4f62c7787a33",
|
||||
"message": {
|
||||
"id": "2e345f8a-20f0-4875-8a03-4f62c7787a33",
|
||||
"author": {
|
||||
"role": "assistant",
|
||||
"name": null,
|
||||
"metadata": {}
|
||||
},
|
||||
"create_time": 1747866131.380603,
|
||||
"update_time": null,
|
||||
"content": {
|
||||
"content_type": "text",
|
||||
"parts": [
|
||||
""
|
||||
]
|
||||
},
|
||||
"status": "finished_successfully",
|
||||
"end_turn": null,
|
||||
"weight": 1.0,
|
||||
"metadata": {
|
||||
"message_type": null,
|
||||
"model_slug": "gpt-4o",
|
||||
"default_model_slug": "auto",
|
||||
"parent_id": "146e9fb6-9330-43ec-b08d-5cce42a76e00",
|
||||
"request_id": "94377872e9abe139-KIX",
|
||||
"timestamp_": "absolute"
|
||||
},
|
||||
"recipient": "all",
|
||||
"channel": null
|
||||
},
|
||||
"parent": "146e9fb6-9330-43ec-b08d-5cce42a76e00",
|
||||
"children": [
|
||||
"abc92aa4-1e33-41f2-bd8c-8a1777b5a3c4"
|
||||
]
|
||||
},
|
||||
"abc92aa4-1e33-41f2-bd8c-8a1777b5a3c4": {
|
||||
"id": "abc92aa4-1e33-41f2-bd8c-8a1777b5a3c4",
|
||||
"message": {
|
||||
"id": "abc92aa4-1e33-41f2-bd8c-8a1777b5a3c4",
|
||||
"author": {
|
||||
"role": "assistant",
|
||||
"name": null,
|
||||
"metadata": {}
|
||||
},
|
||||
"create_time": 1747866131.389098,
|
||||
"update_time": null,
|
||||
"content": {
|
||||
"content_type": "text",
|
||||
"parts": [
|
||||
"こんにちは〜!✨ \nアイだよっ!今日も会えてうれしいなっ💛 "
|
||||
]
|
||||
},
|
||||
"status": "finished_successfully",
|
||||
"end_turn": true,
|
||||
"weight": 1.0,
|
||||
"metadata": {
|
||||
"finish_details": {
|
||||
"type": "stop",
|
||||
"stop_tokens": [
|
||||
200002
|
||||
]
|
||||
},
|
||||
"is_complete": true,
|
||||
"citations": [],
|
||||
"content_references": [],
|
||||
"message_type": null,
|
||||
"model_slug": "gpt-4o",
|
||||
"default_model_slug": "auto",
|
||||
"parent_id": "2e345f8a-20f0-4875-8a03-4f62c7787a33",
|
||||
"request_id": "94377872e9abe139-KIX",
|
||||
"timestamp_": "absolute"
|
||||
},
|
||||
"recipient": "all",
|
||||
"channel": null
|
||||
},
|
||||
"parent": "2e345f8a-20f0-4875-8a03-4f62c7787a33",
|
||||
"children": [
|
||||
"0be4b4a5-d52f-4bef-927e-5d6f93a9cb26"
|
||||
]
|
||||
}
|
||||
},
|
||||
"moderation_results": [],
|
||||
"current_node": "",
|
||||
"plugin_ids": null,
|
||||
"conversation_id": "",
|
||||
"conversation_template_id": null,
|
||||
"gizmo_id": null,
|
||||
"gizmo_type": null,
|
||||
"is_archived": true,
|
||||
"is_starred": null,
|
||||
"safe_urls": [],
|
||||
"blocked_urls": [],
|
||||
"default_model_slug": "auto",
|
||||
"conversation_origin": null,
|
||||
"voice": null,
|
||||
"async_status": null,
|
||||
"disabled_tool_ids": [],
|
||||
"is_do_not_remember": null,
|
||||
"memory_scope": "global_enabled",
|
||||
"id": ""
|
||||
},
|
||||
{
|
||||
"title": "img",
|
||||
"create_time": 1747448872.545226,
|
||||
"update_time": 1748085075.161424,
|
||||
"mapping": {
|
||||
"2de0f3c9-52b1-49bf-b980-b3ef9be6551e": {
|
||||
"id": "2de0f3c9-52b1-49bf-b980-b3ef9be6551e",
|
||||
"message": {
|
||||
"id": "2de0f3c9-52b1-49bf-b980-b3ef9be6551e",
|
||||
"author": {
|
||||
"role": "user",
|
||||
"name": null,
|
||||
"metadata": {}
|
||||
},
|
||||
"create_time": 1748085041.769279,
|
||||
"update_time": null,
|
||||
"content": {
|
||||
"content_type": "multimodal_text",
|
||||
"parts": [
|
||||
{
|
||||
"content_type": "image_asset_pointer",
|
||||
"asset_pointer": "",
|
||||
"size_bytes": 425613,
|
||||
"width": 333,
|
||||
"height": 444,
|
||||
"fovea": null,
|
||||
"metadata": {
|
||||
"dalle": null,
|
||||
"gizmo": null,
|
||||
"generation": null,
|
||||
"container_pixel_height": null,
|
||||
"container_pixel_width": null,
|
||||
"emu_omit_glimpse_image": null,
|
||||
"emu_patches_override": null,
|
||||
"sanitized": true,
|
||||
"asset_pointer_link": null,
|
||||
"watermarked_asset_pointer": null
|
||||
}
|
||||
},
|
||||
""
|
||||
]
|
||||
},
|
||||
"status": "finished_successfully",
|
||||
"end_turn": null,
|
||||
"weight": 1.0,
|
||||
"metadata": {
|
||||
"attachments": [
|
||||
{
|
||||
"name": "",
|
||||
"width": 333,
|
||||
"height": 444,
|
||||
"size": 425613,
|
||||
"id": "file-35eytNMMTW2k7vKUHBuNzW"
|
||||
}
|
||||
],
|
||||
"request_id": "944c59177932fc9a-KIX",
|
||||
"message_source": null,
|
||||
"timestamp_": "absolute",
|
||||
"message_type": null
|
||||
},
|
||||
"recipient": "all",
|
||||
"channel": null
|
||||
},
|
||||
"parent": "7960fbff-bc4f-45e7-95e9-9d0bc79d9090",
|
||||
"children": [
|
||||
"98d84adc-156e-4c81-8cd8-9b0eb01c8369"
|
||||
]
|
||||
},
|
||||
"98d84adc-156e-4c81-8cd8-9b0eb01c8369": {
|
||||
"id": "98d84adc-156e-4c81-8cd8-9b0eb01c8369",
|
||||
"message": {
|
||||
"id": "98d84adc-156e-4c81-8cd8-9b0eb01c8369",
|
||||
"author": {
|
||||
"role": "assistant",
|
||||
"name": null,
|
||||
"metadata": {}
|
||||
},
|
||||
"create_time": 1748085043.312312,
|
||||
"update_time": null,
|
||||
"content": {
|
||||
"content_type": "text",
|
||||
"parts": [
|
||||
""
|
||||
]
|
||||
},
|
||||
"status": "finished_successfully",
|
||||
"end_turn": true,
|
||||
"weight": 1.0,
|
||||
"metadata": {
|
||||
"finish_details": {
|
||||
"type": "stop",
|
||||
"stop_tokens": [
|
||||
200002
|
||||
]
|
||||
},
|
||||
"is_complete": true,
|
||||
"citations": [],
|
||||
"content_references": [],
|
||||
"message_type": null,
|
||||
"model_slug": "gpt-4o",
|
||||
"default_model_slug": "auto",
|
||||
"parent_id": "2de0f3c9-52b1-49bf-b980-b3ef9be6551e",
|
||||
"request_id": "944c5912c8fdd1c6-KIX",
|
||||
"timestamp_": "absolute"
|
||||
},
|
||||
"recipient": "all",
|
||||
"channel": null
|
||||
},
|
||||
"parent": "2de0f3c9-52b1-49bf-b980-b3ef9be6551e",
|
||||
"children": [
|
||||
"caa61793-9dbf-44a5-945b-5ca4cd5130d0"
|
||||
]
|
||||
}
|
||||
},
|
||||
"moderation_results": [],
|
||||
"current_node": "06488d3f-a95f-4906-96d1-f7e9ba1e8662",
|
||||
"plugin_ids": null,
|
||||
"conversation_id": "6827f428-78e8-800d-b3bf-eb7ff4288e47",
|
||||
"conversation_template_id": null,
|
||||
"gizmo_id": null,
|
||||
"gizmo_type": null,
|
||||
"is_archived": false,
|
||||
"is_starred": null,
|
||||
"safe_urls": [
|
||||
"https://exifinfo.org/"
|
||||
],
|
||||
"blocked_urls": [],
|
||||
"default_model_slug": "auto",
|
||||
"conversation_origin": null,
|
||||
"voice": null,
|
||||
"async_status": null,
|
||||
"disabled_tool_ids": [],
|
||||
"is_do_not_remember": false,
|
||||
"memory_scope": "global_enabled",
|
||||
"id": "6827f428-78e8-800d-b3bf-eb7ff4288e47"
|
||||
}
|
||||
]
|
28
mcp/cli.py
28
mcp/cli.py
@ -1,28 +0,0 @@
|
||||
# cli.py
|
||||
import sys
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
|
||||
SCRIPT_DIR = Path.home() / ".config" / "aigpt" / "mcp" / "scripts"
|
||||
def run_script(name):
|
||||
script_path = SCRIPT_DIR / f"{name}.py"
|
||||
if not script_path.exists():
|
||||
print(f"❌ スクリプトが見つかりません: {script_path}")
|
||||
sys.exit(1)
|
||||
|
||||
args = sys.argv[2:] # ← "ask" の後の引数を取り出す
|
||||
result = subprocess.run(["python", str(script_path)] + args, capture_output=True, text=True)
|
||||
print(result.stdout)
|
||||
if result.stderr:
|
||||
print(result.stderr)
|
||||
def main():
|
||||
if len(sys.argv) < 2:
|
||||
print("Usage: mcp <script>")
|
||||
return
|
||||
|
||||
command = sys.argv[1]
|
||||
|
||||
if command in {"summarize", "ask", "setup", "server"}:
|
||||
run_script(command)
|
||||
else:
|
||||
print(f"❓ 未知のコマンド: {command}")
|
@ -1,5 +1,4 @@
|
||||
# scripts/config.py
|
||||
# scripts/config.py
|
||||
# mcp/config.py
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
@ -9,11 +8,13 @@ MEMORY_DIR = BASE_DIR / "memory"
|
||||
SUMMARY_DIR = MEMORY_DIR / "summary"
|
||||
|
||||
def init_directories():
|
||||
"""必要なディレクトリを作成"""
|
||||
BASE_DIR.mkdir(parents=True, exist_ok=True)
|
||||
MEMORY_DIR.mkdir(parents=True, exist_ok=True)
|
||||
SUMMARY_DIR.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
def load_config():
|
||||
"""環境変数から設定を読み込み"""
|
||||
provider = os.getenv("PROVIDER", "ollama")
|
||||
model = os.getenv("MODEL", "syui/ai" if provider == "ollama" else "gpt-4o-mini")
|
||||
api_key = os.getenv("OPENAI_API_KEY", "")
|
212
mcp/memory_client.py
Normal file
212
mcp/memory_client.py
Normal file
@ -0,0 +1,212 @@
|
||||
# mcp/memory_client.py
|
||||
"""
|
||||
Memory client for importing and managing ChatGPT conversations
|
||||
"""
|
||||
import sys
|
||||
import json
|
||||
import requests
|
||||
from pathlib import Path
|
||||
from typing import Dict, Any, List
|
||||
|
||||
class MemoryClient:
|
||||
"""記憶機能のクライアント"""
|
||||
|
||||
def __init__(self, server_url: str = "http://127.0.0.1:5000"):
|
||||
self.server_url = server_url.rstrip('/')
|
||||
|
||||
def import_chatgpt_file(self, filepath: str) -> Dict[str, Any]:
|
||||
"""ChatGPTのエクスポートファイルをインポート"""
|
||||
try:
|
||||
with open(filepath, 'r', encoding='utf-8') as f:
|
||||
data = json.load(f)
|
||||
|
||||
# ファイルが配列の場合(複数の会話)
|
||||
if isinstance(data, list):
|
||||
results = []
|
||||
for conversation in data:
|
||||
result = self._import_single_conversation(conversation)
|
||||
results.append(result)
|
||||
return {
|
||||
"success": True,
|
||||
"imported_count": len([r for r in results if r.get("success")]),
|
||||
"total_count": len(results),
|
||||
"results": results
|
||||
}
|
||||
else:
|
||||
# 単一の会話
|
||||
return self._import_single_conversation(data)
|
||||
|
||||
except FileNotFoundError:
|
||||
return {"success": False, "error": f"File not found: {filepath}"}
|
||||
except json.JSONDecodeError as e:
|
||||
return {"success": False, "error": f"Invalid JSON: {e}"}
|
||||
except Exception as e:
|
||||
return {"success": False, "error": str(e)}
|
||||
|
||||
def _import_single_conversation(self, conversation_data: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""単一の会話をインポート"""
|
||||
try:
|
||||
response = requests.post(
|
||||
f"{self.server_url}/memory/import/chatgpt",
|
||||
json={"conversation_data": conversation_data},
|
||||
timeout=30
|
||||
)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
except requests.RequestException as e:
|
||||
return {"success": False, "error": f"Server error: {e}"}
|
||||
|
||||
def search_memories(self, query: str, limit: int = 10) -> Dict[str, Any]:
|
||||
"""記憶を検索"""
|
||||
try:
|
||||
response = requests.post(
|
||||
f"{self.server_url}/memory/search",
|
||||
json={"query": query, "limit": limit},
|
||||
timeout=30
|
||||
)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
except requests.RequestException as e:
|
||||
return {"success": False, "error": f"Server error: {e}"}
|
||||
|
||||
def list_memories(self) -> Dict[str, Any]:
|
||||
"""記憶一覧を取得"""
|
||||
try:
|
||||
response = requests.get(f"{self.server_url}/memory/list", timeout=30)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
except requests.RequestException as e:
|
||||
return {"success": False, "error": f"Server error: {e}"}
|
||||
|
||||
def get_memory_detail(self, filepath: str) -> Dict[str, Any]:
|
||||
"""記憶の詳細を取得"""
|
||||
try:
|
||||
response = requests.get(
|
||||
f"{self.server_url}/memory/detail",
|
||||
params={"filepath": filepath},
|
||||
timeout=30
|
||||
)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
except requests.RequestException as e:
|
||||
return {"success": False, "error": f"Server error: {e}"}
|
||||
|
||||
def chat_with_memory(self, message: str, model: str = None) -> Dict[str, Any]:
|
||||
"""記憶を活用してチャット"""
|
||||
try:
|
||||
payload = {"message": message}
|
||||
if model:
|
||||
payload["model"] = model
|
||||
|
||||
response = requests.post(
|
||||
f"{self.server_url}/chat",
|
||||
json=payload,
|
||||
timeout=30
|
||||
)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
except requests.RequestException as e:
|
||||
return {"success": False, "error": f"Server error: {e}"}
|
||||
|
||||
def main():
|
||||
"""コマンドライン インターフェース"""
|
||||
if len(sys.argv) < 2:
|
||||
print("Usage:")
|
||||
print(" python memory_client.py import <chatgpt_export.json>")
|
||||
print(" python memory_client.py search <query>")
|
||||
print(" python memory_client.py list")
|
||||
print(" python memory_client.py detail <filepath>")
|
||||
print(" python memory_client.py chat <message>")
|
||||
sys.exit(1)
|
||||
|
||||
client = MemoryClient()
|
||||
command = sys.argv[1]
|
||||
|
||||
try:
|
||||
if command == "import" and len(sys.argv) == 3:
|
||||
filepath = sys.argv[2]
|
||||
print(f"🔄 Importing ChatGPT conversations from {filepath}...")
|
||||
result = client.import_chatgpt_file(filepath)
|
||||
|
||||
if result.get("success"):
|
||||
if "imported_count" in result:
|
||||
print(f"✅ Imported {result['imported_count']}/{result['total_count']} conversations")
|
||||
else:
|
||||
print("✅ Conversation imported successfully")
|
||||
print(f"📁 Saved to: {result.get('filepath', 'Unknown')}")
|
||||
else:
|
||||
print(f"❌ Import failed: {result.get('error')}")
|
||||
|
||||
elif command == "search" and len(sys.argv) == 3:
|
||||
query = sys.argv[2]
|
||||
print(f"🔍 Searching for: {query}")
|
||||
result = client.search_memories(query)
|
||||
|
||||
if result.get("success"):
|
||||
memories = result.get("results", [])
|
||||
print(f"📚 Found {len(memories)} memories:")
|
||||
for memory in memories:
|
||||
print(f" • {memory.get('title', 'Untitled')}")
|
||||
print(f" Summary: {memory.get('summary', 'No summary')}")
|
||||
print(f" Messages: {memory.get('message_count', 0)}")
|
||||
print()
|
||||
else:
|
||||
print(f"❌ Search failed: {result.get('error')}")
|
||||
|
||||
elif command == "list":
|
||||
print("📋 Listing all memories...")
|
||||
result = client.list_memories()
|
||||
|
||||
if result.get("success"):
|
||||
memories = result.get("memories", [])
|
||||
print(f"📚 Total memories: {len(memories)}")
|
||||
for memory in memories:
|
||||
print(f" • {memory.get('title', 'Untitled')}")
|
||||
print(f" Source: {memory.get('source', 'Unknown')}")
|
||||
print(f" Messages: {memory.get('message_count', 0)}")
|
||||
print(f" Imported: {memory.get('import_time', 'Unknown')}")
|
||||
print()
|
||||
else:
|
||||
print(f"❌ List failed: {result.get('error')}")
|
||||
|
||||
elif command == "detail" and len(sys.argv) == 3:
|
||||
filepath = sys.argv[2]
|
||||
print(f"📄 Getting details for: {filepath}")
|
||||
result = client.get_memory_detail(filepath)
|
||||
|
||||
if result.get("success"):
|
||||
memory = result.get("memory", {})
|
||||
print(f"Title: {memory.get('title', 'Untitled')}")
|
||||
print(f"Source: {memory.get('source', 'Unknown')}")
|
||||
print(f"Summary: {memory.get('summary', 'No summary')}")
|
||||
print(f"Messages: {len(memory.get('messages', []))}")
|
||||
print()
|
||||
print("Recent messages:")
|
||||
for msg in memory.get('messages', [])[:5]:
|
||||
role = msg.get('role', 'unknown')
|
||||
content = msg.get('content', '')[:100]
|
||||
print(f" {role}: {content}...")
|
||||
else:
|
||||
print(f"❌ Detail failed: {result.get('error')}")
|
||||
|
||||
elif command == "chat" and len(sys.argv) == 3:
|
||||
message = sys.argv[2]
|
||||
print(f"💬 Chatting with memory: {message}")
|
||||
result = client.chat_with_memory(message)
|
||||
|
||||
if result.get("success"):
|
||||
print(f"🤖 Response: {result.get('response')}")
|
||||
print(f"📚 Memories used: {result.get('memories_used', 0)}")
|
||||
else:
|
||||
print(f"❌ Chat failed: {result.get('error')}")
|
||||
|
||||
else:
|
||||
print("❌ Invalid command or arguments")
|
||||
sys.exit(1)
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Error: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
8
mcp/requirements.txt
Normal file
8
mcp/requirements.txt
Normal file
@ -0,0 +1,8 @@
|
||||
# rerequirements.txt
|
||||
fastapi>=0.104.0
|
||||
uvicorn[standard]>=0.24.0
|
||||
pydantic>=2.5.0
|
||||
requests>=2.31.0
|
||||
python-multipart>=0.0.6
|
||||
aiohttp
|
||||
asyncio
|
@ -1,198 +0,0 @@
|
||||
## scripts/ask.py
|
||||
import sys
|
||||
import json
|
||||
import requests
|
||||
from config import load_config
|
||||
from datetime import datetime, timezone
|
||||
|
||||
def build_payload_openai(cfg, message: str):
|
||||
return {
|
||||
"model": cfg["model"],
|
||||
"tools": [
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "ask_message",
|
||||
"description": "過去の記憶を検索します",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"query": {
|
||||
"type": "string",
|
||||
"description": "検索したい語句"
|
||||
}
|
||||
},
|
||||
"required": ["query"]
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"tool_choice": "auto",
|
||||
"messages": [
|
||||
{"role": "system", "content": "あなたは親しみやすいAIで、必要に応じて記憶から情報を検索して応答します。"},
|
||||
{"role": "user", "content": message}
|
||||
]
|
||||
}
|
||||
|
||||
def build_payload_mcp(message: str):
|
||||
return {
|
||||
"tool": "ask_message", # MCPサーバー側で定義されたツール名
|
||||
"input": {
|
||||
"message": message
|
||||
}
|
||||
}
|
||||
|
||||
def build_payload_openai(cfg, message: str):
|
||||
return {
|
||||
"model": cfg["model"],
|
||||
"messages": [
|
||||
{"role": "system", "content": "あなたは思いやりのあるAIです。"},
|
||||
{"role": "user", "content": message}
|
||||
],
|
||||
"temperature": 0.7
|
||||
}
|
||||
|
||||
def call_mcp(cfg, message: str):
|
||||
payload = build_payload_mcp(message)
|
||||
headers = {"Content-Type": "application/json"}
|
||||
response = requests.post(cfg["url"], headers=headers, json=payload)
|
||||
response.raise_for_status()
|
||||
return response.json().get("output", {}).get("response", "❓ 応答が取得できませんでした")
|
||||
|
||||
def call_openai(cfg, message: str):
|
||||
# ツール定義
|
||||
tools = [
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "memory",
|
||||
"description": "記憶を検索する",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"query": {
|
||||
"type": "string",
|
||||
"description": "検索する語句"
|
||||
}
|
||||
},
|
||||
"required": ["query"]
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
# 最初のメッセージ送信
|
||||
payload = {
|
||||
"model": cfg["model"],
|
||||
"messages": [
|
||||
{"role": "system", "content": "あなたはAIで、必要に応じてツールmemoryを使って記憶を検索します。"},
|
||||
{"role": "user", "content": message}
|
||||
],
|
||||
"tools": tools,
|
||||
"tool_choice": "auto"
|
||||
}
|
||||
|
||||
headers = {
|
||||
"Authorization": f"Bearer {cfg['api_key']}",
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
|
||||
res1 = requests.post(cfg["url"], headers=headers, json=payload)
|
||||
res1.raise_for_status()
|
||||
result = res1.json()
|
||||
|
||||
# 🧠 tool_call されたか確認
|
||||
if "tool_calls" in result["choices"][0]["message"]:
|
||||
tool_call = result["choices"][0]["message"]["tool_calls"][0]
|
||||
if tool_call["function"]["name"] == "memory":
|
||||
args = json.loads(tool_call["function"]["arguments"])
|
||||
query = args.get("query", "")
|
||||
print(f"🛠️ ツール実行: memory(query='{query}')")
|
||||
|
||||
# MCPエンドポイントにPOST
|
||||
memory_res = requests.post("http://127.0.0.1:5000/memory/search", json={"query": query})
|
||||
memory_json = memory_res.json()
|
||||
tool_output = memory_json.get("result", "なし")
|
||||
|
||||
# tool_outputをAIに返す
|
||||
followup = {
|
||||
"model": cfg["model"],
|
||||
"messages": [
|
||||
{"role": "system", "content": "あなたはAIで、必要に応じてツールmemoryを使って記憶を検索します。"},
|
||||
{"role": "user", "content": message},
|
||||
{"role": "assistant", "tool_calls": result["choices"][0]["message"]["tool_calls"]},
|
||||
{"role": "tool", "tool_call_id": tool_call["id"], "name": "memory", "content": tool_output}
|
||||
]
|
||||
}
|
||||
|
||||
res2 = requests.post(cfg["url"], headers=headers, json=followup)
|
||||
res2.raise_for_status()
|
||||
final_response = res2.json()
|
||||
return final_response["choices"][0]["message"]["content"]
|
||||
#print(tool_output)
|
||||
#print(cfg["model"])
|
||||
#print(final_response)
|
||||
|
||||
# ツール未使用 or 通常応答
|
||||
return result["choices"][0]["message"]["content"]
|
||||
|
||||
def call_ollama(cfg, message: str):
|
||||
payload = {
|
||||
"model": cfg["model"],
|
||||
"prompt": message, # `prompt` → `message` にすべき(変数未定義エラー回避)
|
||||
"stream": False
|
||||
}
|
||||
headers = {"Content-Type": "application/json"}
|
||||
response = requests.post(cfg["url"], headers=headers, json=payload)
|
||||
response.raise_for_status()
|
||||
return response.json().get("response", "❌ 応答が取得できませんでした")
|
||||
def main():
|
||||
if len(sys.argv) < 2:
|
||||
print("Usage: ask.py 'your message'")
|
||||
return
|
||||
|
||||
message = sys.argv[1]
|
||||
cfg = load_config()
|
||||
|
||||
print(f"🔍 使用プロバイダー: {cfg['provider']}")
|
||||
|
||||
try:
|
||||
if cfg["provider"] == "openai":
|
||||
response = call_openai(cfg, message)
|
||||
elif cfg["provider"] == "mcp":
|
||||
response = call_mcp(cfg, message)
|
||||
elif cfg["provider"] == "ollama":
|
||||
response = call_ollama(cfg, message)
|
||||
else:
|
||||
raise ValueError(f"未対応のプロバイダー: {cfg['provider']}")
|
||||
|
||||
print("💬 応答:")
|
||||
print(response)
|
||||
|
||||
# ログ保存(オプション)
|
||||
save_log(message, response)
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ 実行エラー: {e}")
|
||||
|
||||
def save_log(user_msg, ai_msg):
|
||||
from config import MEMORY_DIR
|
||||
date_str = datetime.now().strftime("%Y-%m-%d")
|
||||
path = MEMORY_DIR / f"{date_str}.json"
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
if path.exists():
|
||||
with open(path, "r") as f:
|
||||
logs = json.load(f)
|
||||
else:
|
||||
logs = []
|
||||
|
||||
now = datetime.now(timezone.utc).isoformat()
|
||||
logs.append({"timestamp": now, "sender": "user", "message": user_msg})
|
||||
logs.append({"timestamp": now, "sender": "ai", "message": ai_msg})
|
||||
|
||||
with open(path, "w") as f:
|
||||
json.dump(logs, f, indent=2, ensure_ascii=False)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -1,11 +0,0 @@
|
||||
import os
|
||||
|
||||
def load_context_from_repo(repo_path: str, extensions={".rs", ".toml", ".md"}) -> str:
|
||||
context = ""
|
||||
for root, dirs, files in os.walk(repo_path):
|
||||
for file in files:
|
||||
if any(file.endswith(ext) for ext in extensions):
|
||||
with open(os.path.join(root, file), "r", encoding="utf-8", errors="ignore") as f:
|
||||
content = f.read()
|
||||
context += f"\n\n# FILE: {os.path.join(root, file)}\n{content}"
|
||||
return context
|
@ -1,92 +0,0 @@
|
||||
# scripts/memory_store.py
|
||||
import json
|
||||
from pathlib import Path
|
||||
from config import MEMORY_DIR
|
||||
from datetime import datetime, timezone
|
||||
|
||||
def load_logs(date_str=None):
|
||||
if date_str is None:
|
||||
date_str = datetime.now().strftime("%Y-%m-%d")
|
||||
path = MEMORY_DIR / f"{date_str}.json"
|
||||
if path.exists():
|
||||
with open(path, "r") as f:
|
||||
return json.load(f)
|
||||
return []
|
||||
|
||||
def save_message(sender, message):
|
||||
date_str = datetime.now().strftime("%Y-%m-%d")
|
||||
path = MEMORY_DIR / f"{date_str}.json"
|
||||
logs = load_logs(date_str)
|
||||
now = datetime.now(timezone.utc).isoformat()
|
||||
logs.append({"timestamp": now, "sender": sender, "message": message})
|
||||
with open(path, "w") as f:
|
||||
json.dump(logs, f, indent=2, ensure_ascii=False)
|
||||
|
||||
def search_memory(query: str):
|
||||
from glob import glob
|
||||
all_logs = []
|
||||
pattern = re.compile(re.escape(query), re.IGNORECASE)
|
||||
|
||||
for file_path in sorted(MEMORY_DIR.glob("*.json")):
|
||||
with open(file_path, "r") as f:
|
||||
logs = json.load(f)
|
||||
matched = [entry for entry in logs if pattern.search(entry["message"])]
|
||||
all_logs.extend(matched)
|
||||
|
||||
return all_logs[-5:]
|
||||
|
||||
# scripts/memory_store.py
|
||||
import json
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from config import MEMORY_DIR
|
||||
|
||||
# ログを読み込む(指定日または当日)
|
||||
def load_logs(date_str=None):
|
||||
if date_str is None:
|
||||
date_str = datetime.now().strftime("%Y-%m-%d")
|
||||
path = MEMORY_DIR / f"{date_str}.json"
|
||||
if path.exists():
|
||||
with open(path, "r") as f:
|
||||
return json.load(f)
|
||||
return []
|
||||
|
||||
# メッセージを保存する
|
||||
def save_message(sender, message):
|
||||
date_str = datetime.now().strftime("%Y-%m-%d")
|
||||
path = MEMORY_DIR / f"{date_str}.json"
|
||||
logs = load_logs(date_str)
|
||||
#now = datetime.utcnow().isoformat() + "Z"
|
||||
now = datetime.now(timezone.utc).isoformat()
|
||||
logs.append({"timestamp": now, "sender": sender, "message": message})
|
||||
with open(path, "w") as f:
|
||||
json.dump(logs, f, indent=2, ensure_ascii=False)
|
||||
|
||||
def search_memory(query: str):
|
||||
from glob import glob
|
||||
all_logs = []
|
||||
for file_path in sorted(MEMORY_DIR.glob("*.json")):
|
||||
with open(file_path, "r") as f:
|
||||
logs = json.load(f)
|
||||
matched = [
|
||||
entry for entry in logs
|
||||
if entry["sender"] == "user" and query in entry["message"]
|
||||
]
|
||||
all_logs.extend(matched)
|
||||
return all_logs[-5:] # 最新5件だけ返す
|
||||
def search_memory(query: str):
|
||||
from glob import glob
|
||||
all_logs = []
|
||||
seen_messages = set() # すでに見たメッセージを保持
|
||||
|
||||
for file_path in sorted(MEMORY_DIR.glob("*.json")):
|
||||
with open(file_path, "r") as f:
|
||||
logs = json.load(f)
|
||||
for entry in logs:
|
||||
if entry["sender"] == "user" and query in entry["message"]:
|
||||
# すでに同じメッセージが結果に含まれていなければ追加
|
||||
if entry["message"] not in seen_messages:
|
||||
all_logs.append(entry)
|
||||
seen_messages.add(entry["message"])
|
||||
|
||||
return all_logs[-5:] # 最新5件だけ返す
|
@ -1,11 +0,0 @@
|
||||
PROMPT_TEMPLATE = """
|
||||
あなたは優秀なAIアシスタントです。
|
||||
|
||||
以下のコードベースの情報を参考にして、質問に答えてください。
|
||||
|
||||
[コードコンテキスト]
|
||||
{context}
|
||||
|
||||
[質問]
|
||||
{question}
|
||||
"""
|
@ -1,56 +0,0 @@
|
||||
# server.py
|
||||
from fastapi import FastAPI, Body
|
||||
from fastapi_mcp import FastApiMCP
|
||||
from pydantic import BaseModel
|
||||
from memory_store import save_message, load_logs, search_memory as do_search_memory
|
||||
|
||||
app = FastAPI()
|
||||
mcp = FastApiMCP(app, name="aigpt-agent", description="MCP Server for AI memory")
|
||||
|
||||
class ChatInput(BaseModel):
|
||||
message: str
|
||||
|
||||
class MemoryInput(BaseModel):
|
||||
sender: str
|
||||
message: str
|
||||
|
||||
class MemoryQuery(BaseModel):
|
||||
query: str
|
||||
|
||||
@app.post("/chat", operation_id="chat")
|
||||
async def chat(input: ChatInput):
|
||||
save_message("user", input.message)
|
||||
response = f"AI: 「{input.message}」を受け取りました!"
|
||||
save_message("ai", response)
|
||||
return {"response": response}
|
||||
|
||||
@app.post("/memory", operation_id="save_memory")
|
||||
async def memory_post(input: MemoryInput):
|
||||
save_message(input.sender, input.message)
|
||||
return {"status": "saved"}
|
||||
|
||||
@app.get("/memory", operation_id="get_memory")
|
||||
async def memory_get():
|
||||
return {"messages": load_messages()}
|
||||
|
||||
@app.post("/ask_message", operation_id="ask_message")
|
||||
async def ask_message(input: MemoryQuery):
|
||||
results = search_memory(input.query)
|
||||
return {
|
||||
"response": f"🔎 記憶から {len(results)} 件ヒット:\n" + "\n".join([f"{r['sender']}: {r['message']}" for r in results])
|
||||
}
|
||||
|
||||
@app.post("/memory/search", operation_id="memory")
|
||||
async def memory_search(query: MemoryQuery):
|
||||
hits = do_search_memory(query.query)
|
||||
if not hits:
|
||||
return {"result": "🔍 記憶の中に該当する内容は見つかりませんでした。"}
|
||||
summary = "\n".join([f"{e['sender']}: {e['message']}" for e in hits])
|
||||
return {"result": f"🔎 見つかった記憶:\n{summary}"}
|
||||
|
||||
mcp.mount()
|
||||
|
||||
if __name__ == "__main__":
|
||||
import uvicorn
|
||||
print("🚀 Starting MCP server...")
|
||||
uvicorn.run(app, host="127.0.0.1", port=5000)
|
@ -1,76 +0,0 @@
|
||||
# scripts/summarize.py
|
||||
import json
|
||||
from datetime import datetime
|
||||
from config import MEMORY_DIR, SUMMARY_DIR, load_config
|
||||
import requests
|
||||
|
||||
def load_memory(date_str):
|
||||
path = MEMORY_DIR / f"{date_str}.json"
|
||||
if not path.exists():
|
||||
print(f"⚠️ メモリファイルが見つかりません: {path}")
|
||||
return None
|
||||
with open(path, "r") as f:
|
||||
return json.load(f)
|
||||
|
||||
def save_summary(date_str, content):
|
||||
SUMMARY_DIR.mkdir(parents=True, exist_ok=True)
|
||||
path = SUMMARY_DIR / f"{date_str}_summary.json"
|
||||
with open(path, "w") as f:
|
||||
json.dump(content, f, indent=2, ensure_ascii=False)
|
||||
print(f"✅ 要約を保存しました: {path}")
|
||||
|
||||
def build_prompt(logs):
|
||||
messages = [
|
||||
{"role": "system", "content": "あなたは要約AIです。以下の会話ログを要約してください。"},
|
||||
{"role": "user", "content": "\n".join(f"{entry['sender']}: {entry['message']}" for entry in logs)}
|
||||
]
|
||||
return messages
|
||||
|
||||
def summarize_with_llm(messages):
|
||||
cfg = load_config()
|
||||
if cfg["provider"] == "openai":
|
||||
headers = {
|
||||
"Authorization": f"Bearer {cfg['api_key']}",
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
payload = {
|
||||
"model": cfg["model"],
|
||||
"messages": messages,
|
||||
"temperature": 0.7
|
||||
}
|
||||
response = requests.post(cfg["url"], headers=headers, json=payload)
|
||||
response.raise_for_status()
|
||||
return response.json()["choices"][0]["message"]["content"]
|
||||
|
||||
elif cfg["provider"] == "ollama":
|
||||
payload = {
|
||||
"model": cfg["model"],
|
||||
"prompt": "\n".join(m["content"] for m in messages),
|
||||
"stream": False,
|
||||
}
|
||||
response = requests.post(cfg["url"], json=payload)
|
||||
response.raise_for_status()
|
||||
return response.json()["response"]
|
||||
|
||||
else:
|
||||
raise ValueError(f"Unsupported provider: {cfg['provider']}")
|
||||
|
||||
def main():
|
||||
date_str = datetime.now().strftime("%Y-%m-%d")
|
||||
logs = load_memory(date_str)
|
||||
if not logs:
|
||||
return
|
||||
|
||||
prompt_messages = build_prompt(logs)
|
||||
summary_text = summarize_with_llm(prompt_messages)
|
||||
|
||||
summary = {
|
||||
"date": date_str,
|
||||
"summary": summary_text,
|
||||
"total_messages": len(logs)
|
||||
}
|
||||
|
||||
save_summary(date_str, summary)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
703
mcp/server.py
Normal file
703
mcp/server.py
Normal file
@ -0,0 +1,703 @@
|
||||
# mcp/server.py
|
||||
"""
|
||||
Enhanced MCP Server with AI Memory Processing for aigpt CLI
|
||||
"""
|
||||
import json
|
||||
import os
|
||||
import hashlib
|
||||
from datetime import datetime, timedelta
|
||||
from pathlib import Path
|
||||
from typing import List, Dict, Any, Optional
|
||||
from fastapi import FastAPI, HTTPException
|
||||
from pydantic import BaseModel
|
||||
import uvicorn
|
||||
import asyncio
|
||||
import aiohttp
|
||||
|
||||
# データモデル
|
||||
class ChatMessage(BaseModel):
|
||||
message: str
|
||||
model: Optional[str] = None
|
||||
|
||||
class MemoryQuery(BaseModel):
|
||||
query: str
|
||||
limit: Optional[int] = 10
|
||||
|
||||
class ConversationImport(BaseModel):
|
||||
conversation_data: Dict[str, Any]
|
||||
|
||||
class MemorySummaryRequest(BaseModel):
|
||||
filepath: str
|
||||
ai_provider: Optional[str] = "openai"
|
||||
|
||||
class RelationshipUpdate(BaseModel):
|
||||
target: str # 対象者/トピック
|
||||
interaction_type: str # "positive", "negative", "neutral"
|
||||
weight: float = 1.0
|
||||
context: Optional[str] = None
|
||||
|
||||
# 設定
|
||||
BASE_DIR = Path.home() / ".config" / "aigpt"
|
||||
MEMORY_DIR = BASE_DIR / "memory"
|
||||
CHATGPT_MEMORY_DIR = MEMORY_DIR / "chatgpt"
|
||||
PROCESSED_MEMORY_DIR = MEMORY_DIR / "processed"
|
||||
RELATIONSHIP_DIR = BASE_DIR / "relationships"
|
||||
|
||||
def init_directories():
|
||||
"""必要なディレクトリを作成"""
|
||||
BASE_DIR.mkdir(parents=True, exist_ok=True)
|
||||
MEMORY_DIR.mkdir(parents=True, exist_ok=True)
|
||||
CHATGPT_MEMORY_DIR.mkdir(parents=True, exist_ok=True)
|
||||
PROCESSED_MEMORY_DIR.mkdir(parents=True, exist_ok=True)
|
||||
RELATIONSHIP_DIR.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
class AIMemoryProcessor:
|
||||
"""AI記憶処理クラス"""
|
||||
|
||||
def __init__(self):
|
||||
# AI APIの設定(環境変数から取得)
|
||||
self.openai_api_key = os.getenv("OPENAI_API_KEY")
|
||||
self.anthropic_api_key = os.getenv("ANTHROPIC_API_KEY")
|
||||
|
||||
async def generate_ai_summary(self, messages: List[Dict[str, Any]], provider: str = "openai") -> Dict[str, Any]:
|
||||
"""AIを使用して会話の高度な要約と分析を生成"""
|
||||
|
||||
# 会話内容を結合
|
||||
conversation_text = ""
|
||||
for msg in messages[-20:]: # 最新20メッセージを使用
|
||||
role = "User" if msg["role"] == "user" else "Assistant"
|
||||
conversation_text += f"{role}: {msg['content'][:500]}\n"
|
||||
|
||||
# プロンプトを構築
|
||||
analysis_prompt = f"""
|
||||
以下の会話を分析し、JSON形式で以下の情報を抽出してください:
|
||||
|
||||
1. main_topics: 主なトピック(最大5個)
|
||||
2. user_intent: ユーザーの意図や目的
|
||||
3. key_insights: 重要な洞察や学び(最大3個)
|
||||
4. relationship_indicators: 関係性を示す要素
|
||||
5. emotional_tone: 感情的なトーン
|
||||
6. action_items: アクションアイテムや次のステップ
|
||||
7. summary: 100文字以内の要約
|
||||
|
||||
会話内容:
|
||||
{conversation_text}
|
||||
|
||||
回答はJSON形式のみで返してください。
|
||||
"""
|
||||
|
||||
try:
|
||||
if provider == "openai" and self.openai_api_key:
|
||||
return await self._call_openai_api(analysis_prompt)
|
||||
elif provider == "anthropic" and self.anthropic_api_key:
|
||||
return await self._call_anthropic_api(analysis_prompt)
|
||||
else:
|
||||
# フォールバック:基本的な分析
|
||||
return self._generate_basic_analysis(messages)
|
||||
except Exception as e:
|
||||
print(f"AI analysis failed: {e}")
|
||||
return self._generate_basic_analysis(messages)
|
||||
|
||||
async def _call_openai_api(self, prompt: str) -> Dict[str, Any]:
|
||||
"""OpenAI APIを呼び出し"""
|
||||
async with aiohttp.ClientSession() as session:
|
||||
headers = {
|
||||
"Authorization": f"Bearer {self.openai_api_key}",
|
||||
"Content-Type": "application/json"
|
||||
}
|
||||
data = {
|
||||
"model": "gpt-4",
|
||||
"messages": [{"role": "user", "content": prompt}],
|
||||
"temperature": 0.3,
|
||||
"max_tokens": 1000
|
||||
}
|
||||
|
||||
async with session.post("https://api.openai.com/v1/chat/completions",
|
||||
headers=headers, json=data) as response:
|
||||
result = await response.json()
|
||||
content = result["choices"][0]["message"]["content"]
|
||||
return json.loads(content)
|
||||
|
||||
async def _call_anthropic_api(self, prompt: str) -> Dict[str, Any]:
|
||||
"""Anthropic APIを呼び出し"""
|
||||
async with aiohttp.ClientSession() as session:
|
||||
headers = {
|
||||
"x-api-key": self.anthropic_api_key,
|
||||
"Content-Type": "application/json",
|
||||
"anthropic-version": "2023-06-01"
|
||||
}
|
||||
data = {
|
||||
"model": "claude-3-sonnet-20240229",
|
||||
"max_tokens": 1000,
|
||||
"messages": [{"role": "user", "content": prompt}]
|
||||
}
|
||||
|
||||
async with session.post("https://api.anthropic.com/v1/messages",
|
||||
headers=headers, json=data) as response:
|
||||
result = await response.json()
|
||||
content = result["content"][0]["text"]
|
||||
return json.loads(content)
|
||||
|
||||
def _generate_basic_analysis(self, messages: List[Dict[str, Any]]) -> Dict[str, Any]:
|
||||
"""基本的な分析(AI APIが利用できない場合のフォールバック)"""
|
||||
user_messages = [msg for msg in messages if msg["role"] == "user"]
|
||||
assistant_messages = [msg for msg in messages if msg["role"] == "assistant"]
|
||||
|
||||
# キーワード抽出(簡易版)
|
||||
all_text = " ".join([msg["content"] for msg in messages])
|
||||
words = all_text.lower().split()
|
||||
word_freq = {}
|
||||
for word in words:
|
||||
if len(word) > 3:
|
||||
word_freq[word] = word_freq.get(word, 0) + 1
|
||||
|
||||
top_words = sorted(word_freq.items(), key=lambda x: x[1], reverse=True)[:5]
|
||||
|
||||
return {
|
||||
"main_topics": [word[0] for word in top_words],
|
||||
"user_intent": "情報収集・問題解決",
|
||||
"key_insights": ["基本的な会話分析"],
|
||||
"relationship_indicators": {
|
||||
"interaction_count": len(messages),
|
||||
"user_engagement": len(user_messages),
|
||||
"assistant_helpfulness": len(assistant_messages)
|
||||
},
|
||||
"emotional_tone": "neutral",
|
||||
"action_items": [],
|
||||
"summary": f"{len(user_messages)}回のやり取りによる会話"
|
||||
}
|
||||
|
||||
class RelationshipTracker:
|
||||
"""関係性追跡クラス"""
|
||||
|
||||
def __init__(self):
|
||||
init_directories()
|
||||
self.relationship_file = RELATIONSHIP_DIR / "relationships.json"
|
||||
self.relationships = self._load_relationships()
|
||||
|
||||
def _load_relationships(self) -> Dict[str, Any]:
|
||||
"""関係性データを読み込み"""
|
||||
if self.relationship_file.exists():
|
||||
with open(self.relationship_file, 'r', encoding='utf-8') as f:
|
||||
return json.load(f)
|
||||
return {"targets": {}, "last_updated": datetime.now().isoformat()}
|
||||
|
||||
def _save_relationships(self):
|
||||
"""関係性データを保存"""
|
||||
self.relationships["last_updated"] = datetime.now().isoformat()
|
||||
with open(self.relationship_file, 'w', encoding='utf-8') as f:
|
||||
json.dump(self.relationships, f, ensure_ascii=False, indent=2)
|
||||
|
||||
def update_relationship(self, target: str, interaction_type: str, weight: float = 1.0, context: str = None):
|
||||
"""関係性を更新"""
|
||||
if target not in self.relationships["targets"]:
|
||||
self.relationships["targets"][target] = {
|
||||
"score": 0.0,
|
||||
"interactions": [],
|
||||
"created_at": datetime.now().isoformat(),
|
||||
"last_interaction": None
|
||||
}
|
||||
|
||||
# スコア計算
|
||||
score_change = 0.0
|
||||
if interaction_type == "positive":
|
||||
score_change = weight * 1.0
|
||||
elif interaction_type == "negative":
|
||||
score_change = weight * -1.0
|
||||
|
||||
# 時間減衰を適用
|
||||
self._apply_time_decay(target)
|
||||
|
||||
# スコア更新
|
||||
current_score = self.relationships["targets"][target]["score"]
|
||||
new_score = current_score + score_change
|
||||
|
||||
# スコアの範囲制限(-100 to 100)
|
||||
new_score = max(-100, min(100, new_score))
|
||||
|
||||
self.relationships["targets"][target]["score"] = new_score
|
||||
self.relationships["targets"][target]["last_interaction"] = datetime.now().isoformat()
|
||||
|
||||
# インタラクション履歴を追加
|
||||
interaction_record = {
|
||||
"type": interaction_type,
|
||||
"weight": weight,
|
||||
"score_change": score_change,
|
||||
"new_score": new_score,
|
||||
"timestamp": datetime.now().isoformat(),
|
||||
"context": context
|
||||
}
|
||||
|
||||
self.relationships["targets"][target]["interactions"].append(interaction_record)
|
||||
|
||||
# 履歴は最新100件まで保持
|
||||
if len(self.relationships["targets"][target]["interactions"]) > 100:
|
||||
self.relationships["targets"][target]["interactions"] = \
|
||||
self.relationships["targets"][target]["interactions"][-100:]
|
||||
|
||||
self._save_relationships()
|
||||
return new_score
|
||||
|
||||
def _apply_time_decay(self, target: str):
|
||||
"""時間減衰を適用"""
|
||||
target_data = self.relationships["targets"][target]
|
||||
last_interaction = target_data.get("last_interaction")
|
||||
|
||||
if last_interaction:
|
||||
last_time = datetime.fromisoformat(last_interaction)
|
||||
now = datetime.now()
|
||||
days_passed = (now - last_time).days
|
||||
|
||||
# 7日ごとに5%減衰
|
||||
if days_passed > 0:
|
||||
decay_factor = 0.95 ** (days_passed / 7)
|
||||
target_data["score"] *= decay_factor
|
||||
|
||||
def get_relationship_score(self, target: str) -> float:
|
||||
"""関係性スコアを取得"""
|
||||
if target in self.relationships["targets"]:
|
||||
self._apply_time_decay(target)
|
||||
return self.relationships["targets"][target]["score"]
|
||||
return 0.0
|
||||
|
||||
def should_send_message(self, target: str, threshold: float = 50.0) -> bool:
|
||||
"""メッセージ送信の可否を判定"""
|
||||
score = self.get_relationship_score(target)
|
||||
return score >= threshold
|
||||
|
||||
def get_all_relationships(self) -> Dict[str, Any]:
|
||||
"""すべての関係性を取得"""
|
||||
# 全ターゲットに時間減衰を適用
|
||||
for target in self.relationships["targets"]:
|
||||
self._apply_time_decay(target)
|
||||
|
||||
return self.relationships
|
||||
|
||||
class MemoryManager:
|
||||
"""記憶管理クラス(AI処理機能付き)"""
|
||||
|
||||
def __init__(self):
|
||||
init_directories()
|
||||
self.ai_processor = AIMemoryProcessor()
|
||||
self.relationship_tracker = RelationshipTracker()
|
||||
|
||||
def parse_chatgpt_conversation(self, conversation_data: Dict[str, Any]) -> List[Dict[str, Any]]:
|
||||
"""ChatGPTの会話データを解析してメッセージを抽出"""
|
||||
messages = []
|
||||
mapping = conversation_data.get("mapping", {})
|
||||
|
||||
# メッセージを時系列順に並べる
|
||||
message_nodes = []
|
||||
for node_id, node in mapping.items():
|
||||
message = node.get("message")
|
||||
if not message:
|
||||
continue
|
||||
content = message.get("content", {})
|
||||
parts = content.get("parts", [])
|
||||
|
||||
if parts and isinstance(parts[0], str) and parts[0].strip():
|
||||
message_nodes.append({
|
||||
"id": node_id,
|
||||
"create_time": message.get("create_time", 0),
|
||||
"author_role": message["author"]["role"],
|
||||
"content": parts[0],
|
||||
"parent": node.get("parent")
|
||||
})
|
||||
|
||||
# 作成時間でソート
|
||||
message_nodes.sort(key=lambda x: x["create_time"] or 0)
|
||||
|
||||
for msg in message_nodes:
|
||||
if msg["author_role"] in ["user", "assistant"]:
|
||||
messages.append({
|
||||
"role": msg["author_role"],
|
||||
"content": msg["content"],
|
||||
"timestamp": msg["create_time"],
|
||||
"message_id": msg["id"]
|
||||
})
|
||||
|
||||
return messages
|
||||
|
||||
async def save_chatgpt_memory(self, conversation_data: Dict[str, Any], process_with_ai: bool = True) -> str:
|
||||
"""ChatGPTの会話を記憶として保存(AI処理オプション付き)"""
|
||||
title = conversation_data.get("title", "untitled")
|
||||
create_time = conversation_data.get("create_time", datetime.now().timestamp())
|
||||
|
||||
# メッセージを解析
|
||||
messages = self.parse_chatgpt_conversation(conversation_data)
|
||||
|
||||
if not messages:
|
||||
raise ValueError("No valid messages found in conversation")
|
||||
|
||||
# AI分析を実行
|
||||
ai_analysis = None
|
||||
if process_with_ai:
|
||||
try:
|
||||
ai_analysis = await self.ai_processor.generate_ai_summary(messages)
|
||||
except Exception as e:
|
||||
print(f"AI analysis failed: {e}")
|
||||
|
||||
# 基本要約を生成
|
||||
basic_summary = self.generate_basic_summary(messages)
|
||||
|
||||
# 保存データを作成
|
||||
memory_data = {
|
||||
"title": title,
|
||||
"source": "chatgpt",
|
||||
"import_time": datetime.now().isoformat(),
|
||||
"original_create_time": create_time,
|
||||
"messages": messages,
|
||||
"basic_summary": basic_summary,
|
||||
"ai_analysis": ai_analysis,
|
||||
"message_count": len(messages),
|
||||
"hash": self._generate_content_hash(messages)
|
||||
}
|
||||
|
||||
# 関係性データを更新
|
||||
if ai_analysis and "relationship_indicators" in ai_analysis:
|
||||
interaction_count = ai_analysis["relationship_indicators"].get("interaction_count", 0)
|
||||
if interaction_count > 10: # 長い会話は関係性にプラス
|
||||
self.relationship_tracker.update_relationship(
|
||||
target="user_general",
|
||||
interaction_type="positive",
|
||||
weight=min(interaction_count / 10, 5.0),
|
||||
context=f"Long conversation: {title}"
|
||||
)
|
||||
|
||||
# ファイル名を生成
|
||||
safe_title = "".join(c for c in title if c.isalnum() or c in (' ', '-', '_')).rstrip()
|
||||
timestamp = datetime.fromtimestamp(create_time).strftime("%Y%m%d_%H%M%S")
|
||||
filename = f"{timestamp}_{safe_title[:50]}.json"
|
||||
|
||||
filepath = CHATGPT_MEMORY_DIR / filename
|
||||
with open(filepath, 'w', encoding='utf-8') as f:
|
||||
json.dump(memory_data, f, ensure_ascii=False, indent=2)
|
||||
|
||||
# 処理済みメモリディレクトリにも保存
|
||||
if ai_analysis:
|
||||
processed_filepath = PROCESSED_MEMORY_DIR / filename
|
||||
with open(processed_filepath, 'w', encoding='utf-8') as f:
|
||||
json.dump(memory_data, f, ensure_ascii=False, indent=2)
|
||||
|
||||
return str(filepath)
|
||||
|
||||
def generate_basic_summary(self, messages: List[Dict[str, Any]]) -> str:
|
||||
"""基本要約を生成"""
|
||||
if not messages:
|
||||
return "Empty conversation"
|
||||
|
||||
user_messages = [msg for msg in messages if msg["role"] == "user"]
|
||||
assistant_messages = [msg for msg in messages if msg["role"] == "assistant"]
|
||||
|
||||
summary = f"Conversation with {len(user_messages)} user messages and {len(assistant_messages)} assistant responses. "
|
||||
|
||||
if user_messages:
|
||||
first_user_msg = user_messages[0]["content"][:100]
|
||||
summary += f"Started with: {first_user_msg}..."
|
||||
|
||||
return summary
|
||||
|
||||
def _generate_content_hash(self, messages: List[Dict[str, Any]]) -> str:
|
||||
"""メッセージ内容のハッシュを生成"""
|
||||
content = "".join([msg["content"] for msg in messages])
|
||||
return hashlib.sha256(content.encode()).hexdigest()[:16]
|
||||
|
||||
def search_memories(self, query: str, limit: int = 10, use_ai_analysis: bool = True) -> List[Dict[str, Any]]:
|
||||
"""記憶を検索(AI分析結果も含む)"""
|
||||
results = []
|
||||
|
||||
# 処理済みメモリから検索
|
||||
search_dirs = [PROCESSED_MEMORY_DIR, CHATGPT_MEMORY_DIR] if use_ai_analysis else [CHATGPT_MEMORY_DIR]
|
||||
|
||||
for search_dir in search_dirs:
|
||||
for filepath in search_dir.glob("*.json"):
|
||||
try:
|
||||
with open(filepath, 'r', encoding='utf-8') as f:
|
||||
memory_data = json.load(f)
|
||||
|
||||
# 検索対象テキストを構築
|
||||
search_text = f"{memory_data.get('title', '')} {memory_data.get('basic_summary', '')}"
|
||||
|
||||
# AI分析結果も検索対象に含める
|
||||
if memory_data.get('ai_analysis'):
|
||||
ai_analysis = memory_data['ai_analysis']
|
||||
search_text += f" {' '.join(ai_analysis.get('main_topics', []))}"
|
||||
search_text += f" {ai_analysis.get('summary', '')}"
|
||||
search_text += f" {' '.join(ai_analysis.get('key_insights', []))}"
|
||||
|
||||
# メッセージ内容も検索対象に含める
|
||||
for msg in memory_data.get('messages', []):
|
||||
search_text += f" {msg.get('content', '')}"
|
||||
|
||||
if query.lower() in search_text.lower():
|
||||
result = {
|
||||
"filepath": str(filepath),
|
||||
"title": memory_data.get("title"),
|
||||
"basic_summary": memory_data.get("basic_summary"),
|
||||
"source": memory_data.get("source"),
|
||||
"import_time": memory_data.get("import_time"),
|
||||
"message_count": len(memory_data.get("messages", [])),
|
||||
"has_ai_analysis": bool(memory_data.get("ai_analysis"))
|
||||
}
|
||||
|
||||
if memory_data.get('ai_analysis'):
|
||||
result["ai_summary"] = memory_data['ai_analysis'].get('summary', '')
|
||||
result["main_topics"] = memory_data['ai_analysis'].get('main_topics', [])
|
||||
|
||||
results.append(result)
|
||||
|
||||
if len(results) >= limit:
|
||||
break
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error reading memory file {filepath}: {e}")
|
||||
continue
|
||||
|
||||
if len(results) >= limit:
|
||||
break
|
||||
|
||||
return results
|
||||
|
||||
def get_memory_detail(self, filepath: str) -> Dict[str, Any]:
|
||||
"""記憶の詳細を取得"""
|
||||
try:
|
||||
with open(filepath, 'r', encoding='utf-8') as f:
|
||||
return json.load(f)
|
||||
except Exception as e:
|
||||
raise ValueError(f"Error reading memory file: {e}")
|
||||
|
||||
def list_all_memories(self) -> List[Dict[str, Any]]:
|
||||
"""すべての記憶をリスト"""
|
||||
memories = []
|
||||
|
||||
for filepath in CHATGPT_MEMORY_DIR.glob("*.json"):
|
||||
try:
|
||||
with open(filepath, 'r', encoding='utf-8') as f:
|
||||
memory_data = json.load(f)
|
||||
|
||||
memory_info = {
|
||||
"filepath": str(filepath),
|
||||
"title": memory_data.get("title"),
|
||||
"basic_summary": memory_data.get("basic_summary"),
|
||||
"source": memory_data.get("source"),
|
||||
"import_time": memory_data.get("import_time"),
|
||||
"message_count": len(memory_data.get("messages", [])),
|
||||
"has_ai_analysis": bool(memory_data.get("ai_analysis"))
|
||||
}
|
||||
|
||||
if memory_data.get('ai_analysis'):
|
||||
memory_info["ai_summary"] = memory_data['ai_analysis'].get('summary', '')
|
||||
memory_info["main_topics"] = memory_data['ai_analysis'].get('main_topics', [])
|
||||
|
||||
memories.append(memory_info)
|
||||
except Exception as e:
|
||||
print(f"Error reading memory file {filepath}: {e}")
|
||||
continue
|
||||
|
||||
# インポート時間でソート
|
||||
memories.sort(key=lambda x: x.get("import_time", ""), reverse=True)
|
||||
return memories
|
||||
|
||||
# FastAPI アプリケーション
|
||||
app = FastAPI(title="AigptMCP Server with AI Memory", version="2.0.0")
|
||||
memory_manager = MemoryManager()
|
||||
|
||||
@app.post("/memory/import/chatgpt")
|
||||
async def import_chatgpt_conversation(data: ConversationImport, process_with_ai: bool = True):
|
||||
"""ChatGPTの会話をインポート(AI処理オプション付き)"""
|
||||
try:
|
||||
filepath = await memory_manager.save_chatgpt_memory(data.conversation_data, process_with_ai)
|
||||
return {
|
||||
"success": True,
|
||||
"message": "Conversation imported successfully",
|
||||
"filepath": filepath,
|
||||
"ai_processed": process_with_ai
|
||||
}
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
|
||||
@app.post("/memory/process-ai")
|
||||
async def process_memory_with_ai(data: MemorySummaryRequest):
|
||||
"""既存の記憶をAIで再処理"""
|
||||
try:
|
||||
# 既存記憶を読み込み
|
||||
memory_data = memory_manager.get_memory_detail(data.filepath)
|
||||
|
||||
# AI分析を実行
|
||||
ai_analysis = await memory_manager.ai_processor.generate_ai_summary(
|
||||
memory_data["messages"],
|
||||
data.ai_provider
|
||||
)
|
||||
|
||||
# データを更新
|
||||
memory_data["ai_analysis"] = ai_analysis
|
||||
memory_data["ai_processed_at"] = datetime.now().isoformat()
|
||||
|
||||
# ファイルを更新
|
||||
with open(data.filepath, 'w', encoding='utf-8') as f:
|
||||
json.dump(memory_data, f, ensure_ascii=False, indent=2)
|
||||
|
||||
# 処理済みディレクトリにもコピー
|
||||
processed_filepath = PROCESSED_MEMORY_DIR / Path(data.filepath).name
|
||||
with open(processed_filepath, 'w', encoding='utf-8') as f:
|
||||
json.dump(memory_data, f, ensure_ascii=False, indent=2)
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"message": "Memory processed with AI successfully",
|
||||
"ai_analysis": ai_analysis
|
||||
}
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
@app.post("/memory/search")
|
||||
async def search_memories(query: MemoryQuery):
|
||||
"""記憶を検索"""
|
||||
try:
|
||||
results = memory_manager.search_memories(query.query, query.limit)
|
||||
return {
|
||||
"success": True,
|
||||
"results": results,
|
||||
"count": len(results)
|
||||
}
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
@app.get("/memory/list")
|
||||
async def list_memories():
|
||||
"""すべての記憶をリスト"""
|
||||
try:
|
||||
memories = memory_manager.list_all_memories()
|
||||
return {
|
||||
"success": True,
|
||||
"memories": memories,
|
||||
"count": len(memories)
|
||||
}
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
@app.get("/memory/detail")
|
||||
async def get_memory_detail(filepath: str):
|
||||
"""記憶の詳細を取得"""
|
||||
try:
|
||||
detail = memory_manager.get_memory_detail(filepath)
|
||||
return {
|
||||
"success": True,
|
||||
"memory": detail
|
||||
}
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=404, detail=str(e))
|
||||
|
||||
@app.post("/relationship/update")
|
||||
async def update_relationship(data: RelationshipUpdate):
|
||||
"""関係性を更新"""
|
||||
try:
|
||||
new_score = memory_manager.relationship_tracker.update_relationship(
|
||||
data.target, data.interaction_type, data.weight, data.context
|
||||
)
|
||||
return {
|
||||
"success": True,
|
||||
"new_score": new_score,
|
||||
"can_send_message": memory_manager.relationship_tracker.should_send_message(data.target)
|
||||
}
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
@app.get("/relationship/list")
|
||||
async def list_relationships():
|
||||
"""すべての関係性をリスト"""
|
||||
try:
|
||||
relationships = memory_manager.relationship_tracker.get_all_relationships()
|
||||
return {
|
||||
"success": True,
|
||||
"relationships": relationships
|
||||
}
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
@app.get("/relationship/check")
|
||||
async def check_send_permission(target: str, threshold: float = 50.0):
|
||||
"""メッセージ送信可否をチェック"""
|
||||
try:
|
||||
score = memory_manager.relationship_tracker.get_relationship_score(target)
|
||||
can_send = memory_manager.relationship_tracker.should_send_message(target, threshold)
|
||||
return {
|
||||
"success": True,
|
||||
"target": target,
|
||||
"score": score,
|
||||
"can_send_message": can_send,
|
||||
"threshold": threshold
|
||||
}
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
@app.post("/chat")
|
||||
async def chat_endpoint(data: ChatMessage):
|
||||
"""チャット機能(記憶と関係性を活用)"""
|
||||
try:
|
||||
# 関連する記憶を検索
|
||||
memories = memory_manager.search_memories(data.message, limit=3)
|
||||
|
||||
# メモリのコンテキストを構築
|
||||
memory_context = ""
|
||||
if memories:
|
||||
memory_context = "\n# Related memories:\n"
|
||||
for memory in memories:
|
||||
memory_context += f"- {memory['title']}: {memory.get('ai_summary', memory.get('basic_summary', ''))}\n"
|
||||
if memory.get('main_topics'):
|
||||
memory_context += f" Topics: {', '.join(memory['main_topics'])}\n"
|
||||
|
||||
# 関係性情報を取得
|
||||
relationships = memory_manager.relationship_tracker.get_all_relationships()
|
||||
|
||||
# 実際のチャット処理
|
||||
enhanced_message = data.message
|
||||
if memory_context:
|
||||
enhanced_message = f"{data.message}\n\n{memory_context}"
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"response": f"Enhanced response with memory context: {enhanced_message}",
|
||||
"memories_used": len(memories),
|
||||
"relationship_info": {
|
||||
"active_relationships": len(relationships.get("targets", {})),
|
||||
"can_initiate_conversations": sum(1 for target, data in relationships.get("targets", {}).items()
|
||||
if memory_manager.relationship_tracker.should_send_message(target))
|
||||
}
|
||||
}
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
@app.get("/")
|
||||
async def root():
|
||||
"""ヘルスチェック"""
|
||||
return {
|
||||
"service": "AigptMCP Server with AI Memory",
|
||||
"version": "2.0.0",
|
||||
"status": "running",
|
||||
"memory_dir": str(MEMORY_DIR),
|
||||
"features": [
|
||||
"AI-powered memory analysis",
|
||||
"Relationship tracking",
|
||||
"Advanced memory search",
|
||||
"Conversation import",
|
||||
"Auto-summary generation"
|
||||
],
|
||||
"endpoints": [
|
||||
"/memory/import/chatgpt",
|
||||
"/memory/process-ai",
|
||||
"/memory/search",
|
||||
"/memory/list",
|
||||
"/memory/detail",
|
||||
"/relationship/update",
|
||||
"/relationship/list",
|
||||
"/relationship/check",
|
||||
"/chat"
|
||||
]
|
||||
}
|
||||
|
||||
if __name__ == "__main__":
|
||||
print("🚀 AigptMCP Server with AI Memory starting...")
|
||||
print(f"📁 Memory directory: {MEMORY_DIR}")
|
||||
print(f"🧠 AI Memory processing: {'✅ Enabled' if os.getenv('OPENAI_API_KEY') or os.getenv('ANTHROPIC_API_KEY') else '❌ Disabled (no API keys)'}")
|
||||
uvicorn.run(app, host="127.0.0.1", port=5000)
|
12
mcp/setup.py
12
mcp/setup.py
@ -1,12 +0,0 @@
|
||||
# setup.py
|
||||
from setuptools import setup
|
||||
|
||||
setup(
|
||||
name='aigpt-mcp',
|
||||
py_modules=['cli'],
|
||||
entry_points={
|
||||
'console_scripts': [
|
||||
'mcp = cli:main',
|
||||
],
|
||||
},
|
||||
)
|
130
readme.md
Normal file
130
readme.md
Normal file
@ -0,0 +1,130 @@
|
||||
Memory-Enhanced MCP Server 使用ガイド
|
||||
概要
|
||||
このMCPサーバーは、ChatGPTの会話履歴を記憶として保存し、AIとの対話で活用できる機能を提供します。
|
||||
|
||||
セットアップ
|
||||
1. 依存関係のインストール
|
||||
bash
|
||||
pip install -r requirements.txt
|
||||
2. サーバーの起動
|
||||
bash
|
||||
python mcp/server.py
|
||||
サーバーは http://localhost:5000 で起動します。
|
||||
|
||||
使用方法
|
||||
1. ChatGPTの会話履歴をインポート
|
||||
ChatGPTから会話をエクスポートし、JSONファイルとして保存してください。
|
||||
|
||||
bash
|
||||
# 単一ファイルをインポート
|
||||
python mcp/memory_client.py import your_chatgpt_export.json
|
||||
|
||||
# インポート結果の例
|
||||
✅ Imported 5/5 conversations
|
||||
2. 記憶の検索
|
||||
bash
|
||||
# キーワードで記憶を検索
|
||||
python mcp/memory_client.py search "プログラミング"
|
||||
|
||||
# 検索結果の例
|
||||
🔍 Searching for: プログラミング
|
||||
📚 Found 3 memories:
|
||||
• Pythonの基礎学習
|
||||
Summary: Conversation with 10 user messages and 8 assistant responses...
|
||||
Messages: 18
|
||||
3. 記憶一覧の表示
|
||||
bash
|
||||
python mcp/memory_client.py list
|
||||
|
||||
# 結果の例
|
||||
📋 Listing all memories...
|
||||
📚 Total memories: 15
|
||||
• day
|
||||
Source: chatgpt
|
||||
Messages: 2
|
||||
Imported: 2025-01-21T10:30:45.123456
|
||||
4. 記憶の詳細表示
|
||||
bash
|
||||
python mcp/memory_client.py detail "/path/to/memory/file.json"
|
||||
|
||||
# 結果の例
|
||||
📄 Getting details for: /path/to/memory/file.json
|
||||
Title: day
|
||||
Source: chatgpt
|
||||
Summary: Conversation with 1 user messages and 1 assistant responses...
|
||||
Messages: 2
|
||||
|
||||
Recent messages:
|
||||
user: こんにちは...
|
||||
assistant: こんにちは〜!✨...
|
||||
5. 記憶を活用したチャット
|
||||
bash
|
||||
python mcp/memory_client.py chat "Pythonについて教えて"
|
||||
|
||||
# 結果の例
|
||||
💬 Chatting with memory: Pythonについて教えて
|
||||
🤖 Response: Enhanced response with memory context...
|
||||
📚 Memories used: 2
|
||||
API エンドポイント
|
||||
POST /memory/import/chatgpt
|
||||
ChatGPTの会話履歴をインポート
|
||||
|
||||
json
|
||||
{
|
||||
"conversation_data": { ... }
|
||||
}
|
||||
POST /memory/search
|
||||
記憶を検索
|
||||
|
||||
json
|
||||
{
|
||||
"query": "検索キーワード",
|
||||
"limit": 10
|
||||
}
|
||||
GET /memory/list
|
||||
すべての記憶をリスト
|
||||
|
||||
GET /memory/detail?filepath=/path/to/file
|
||||
記憶の詳細を取得
|
||||
|
||||
POST /chat
|
||||
記憶を活用したチャット
|
||||
|
||||
json
|
||||
{
|
||||
"message": "メッセージ",
|
||||
"model": "model_name"
|
||||
}
|
||||
記憶の保存場所
|
||||
記憶は以下のディレクトリに保存されます:
|
||||
|
||||
~/.config/aigpt/memory/chatgpt/
|
||||
各会話は個別のJSONファイルとして保存され、以下の情報を含みます:
|
||||
|
||||
タイトル
|
||||
インポート時刻
|
||||
メッセージ履歴
|
||||
自動生成された要約
|
||||
メタデータ
|
||||
ChatGPTの会話エクスポート方法
|
||||
ChatGPTの設定画面を開く
|
||||
"Data controls" → "Export data" を選択
|
||||
エクスポートファイルをダウンロード
|
||||
conversations.json ファイルを使用
|
||||
拡張可能な機能
|
||||
高度な検索: ベクトル検索やセマンティック検索の実装
|
||||
要約生成: AIによる自動要約の改善
|
||||
記憶の分類: カテゴリやタグによる分類
|
||||
記憶の統合: 複数の会話からの知識統合
|
||||
プライバシー保護: 機密情報の自動検出・マスキング
|
||||
トラブルシューティング
|
||||
サーバーが起動しない
|
||||
ポート5000が使用中でないか確認
|
||||
依存関係が正しくインストールされているか確認
|
||||
インポートに失敗する
|
||||
JSONファイルが正しい形式か確認
|
||||
ファイルパスが正しいか確認
|
||||
ファイルの権限を確認
|
||||
検索結果が表示されない
|
||||
インポートが正常に完了しているか確認
|
||||
検索キーワードを変更して試行
|
37
src/agent.rs
37
src/agent.rs
@ -1,37 +0,0 @@
|
||||
use chrono::{NaiveDateTime};
|
||||
|
||||
#[allow(dead_code)]
|
||||
#[derive(Debug)]
|
||||
pub struct AIState {
|
||||
pub relation_score: f32,
|
||||
pub previous_score: f32,
|
||||
pub decay_rate: f32,
|
||||
pub sensitivity: f32,
|
||||
pub message_threshold: f32,
|
||||
pub last_message_time: NaiveDateTime,
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
impl AIState {
|
||||
pub fn update(&mut self, now: NaiveDateTime) {
|
||||
let days_passed = (now - self.last_message_time).num_days() as f32;
|
||||
let decay = self.decay_rate * days_passed;
|
||||
self.previous_score = self.relation_score;
|
||||
self.relation_score -= decay;
|
||||
self.relation_score = self.relation_score.clamp(0.0, 100.0);
|
||||
}
|
||||
|
||||
pub fn should_talk(&self) -> bool {
|
||||
let delta = self.previous_score - self.relation_score;
|
||||
delta > self.message_threshold && self.sensitivity > 0.5
|
||||
}
|
||||
|
||||
pub fn generate_message(&self) -> String {
|
||||
match self.relation_score as i32 {
|
||||
80..=100 => "ふふっ、最近どうしてる?会いたくなっちゃった!".to_string(),
|
||||
60..=79 => "ちょっとだけ、さみしかったんだよ?".to_string(),
|
||||
40..=59 => "えっと……話せる時間ある?".to_string(),
|
||||
_ => "ううん、もしかして私のこと、忘れちゃったのかな……".to_string(),
|
||||
}
|
||||
}
|
||||
}
|
140
src/chat.rs
140
src/chat.rs
@ -1,140 +0,0 @@
|
||||
// src/chat.rs
|
||||
use std::fs;
|
||||
use std::process::Command;
|
||||
use serde::Deserialize;
|
||||
use seahorse::Context;
|
||||
use crate::config::ConfigPaths;
|
||||
use crate::metrics::{load_user_data, save_user_data, update_metrics_decay};
|
||||
//use std::process::Stdio;
|
||||
//use std::io::Write;
|
||||
//use std::time::Duration;
|
||||
//use std::net::TcpStream;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub enum Provider {
|
||||
OpenAI,
|
||||
Ollama,
|
||||
MCP,
|
||||
}
|
||||
|
||||
impl Provider {
|
||||
pub fn from_str(s: &str) -> Option<Self> {
|
||||
match s.to_lowercase().as_str() {
|
||||
"openai" => Some(Provider::OpenAI),
|
||||
"ollama" => Some(Provider::Ollama),
|
||||
"mcp" => Some(Provider::MCP),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn as_str(&self) -> &'static str {
|
||||
match self {
|
||||
Provider::OpenAI => "openai",
|
||||
Provider::Ollama => "ollama",
|
||||
Provider::MCP => "mcp",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct OpenAIKey {
|
||||
token: String,
|
||||
}
|
||||
|
||||
fn load_openai_api_key() -> Option<String> {
|
||||
let config = ConfigPaths::new();
|
||||
let path = config.base_dir.join("openai.json");
|
||||
let data = fs::read_to_string(path).ok()?;
|
||||
let parsed: OpenAIKey = serde_json::from_str(&data).ok()?;
|
||||
Some(parsed.token)
|
||||
}
|
||||
|
||||
pub fn ask_chat(c: &Context, question: &str) -> Option<String> {
|
||||
let config = ConfigPaths::new();
|
||||
let base_dir = config.base_dir.join("mcp");
|
||||
let user_path = config.base_dir.join("user.json");
|
||||
|
||||
let mut user = load_user_data(&user_path);
|
||||
user.metrics = update_metrics_decay();
|
||||
|
||||
// 各種オプション
|
||||
let ollama_host = c.string_flag("host").ok();
|
||||
let ollama_model = c.string_flag("model").ok();
|
||||
let provider_str = c.string_flag("provider").unwrap_or_else(|_| "ollama".to_string());
|
||||
let provider = Provider::from_str(&provider_str).unwrap_or(Provider::Ollama);
|
||||
let api_key = c.string_flag("api-key").ok().or_else(load_openai_api_key);
|
||||
|
||||
println!("🔍 使用プロバイダー: {}", provider.as_str());
|
||||
|
||||
match provider {
|
||||
Provider::MCP => {
|
||||
let client = reqwest::blocking::Client::new();
|
||||
let url = std::env::var("MCP_URL").unwrap_or("http://127.0.0.1:5000/chat".to_string());
|
||||
let res = client.post(url)
|
||||
.json(&serde_json::json!({"message": question}))
|
||||
.send();
|
||||
|
||||
match res {
|
||||
Ok(resp) => {
|
||||
if resp.status().is_success() {
|
||||
let json: serde_json::Value = resp.json().ok()?;
|
||||
let text = json.get("response")?.as_str()?.to_string();
|
||||
user.metrics.intimacy += 0.01;
|
||||
user.metrics.last_updated = chrono::Utc::now();
|
||||
save_user_data(&user_path, &user);
|
||||
Some(text)
|
||||
} else {
|
||||
eprintln!("❌ MCPエラー: HTTP {}", resp.status());
|
||||
None
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
eprintln!("❌ MCP接続失敗: {}", e);
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
// Python 実行パス
|
||||
let python_path = if cfg!(target_os = "windows") {
|
||||
base_dir.join(".venv/Scripts/mcp.exe")
|
||||
} else {
|
||||
base_dir.join(".venv/bin/mcp")
|
||||
};
|
||||
|
||||
let mut command = Command::new(python_path);
|
||||
command.arg("ask").arg(question);
|
||||
|
||||
if let Some(host) = ollama_host {
|
||||
command.env("OLLAMA_HOST", host);
|
||||
}
|
||||
if let Some(model) = ollama_model {
|
||||
command.env("OLLAMA_MODEL", model.clone());
|
||||
command.env("OPENAI_MODEL", model);
|
||||
}
|
||||
command.env("PROVIDER", provider.as_str());
|
||||
|
||||
if let Some(key) = api_key {
|
||||
command.env("OPENAI_API_KEY", key);
|
||||
}
|
||||
|
||||
let output = command.output().expect("❌ MCPチャットスクリプトの実行に失敗しました");
|
||||
|
||||
if output.status.success() {
|
||||
let response = String::from_utf8_lossy(&output.stdout).to_string();
|
||||
user.metrics.intimacy += 0.01;
|
||||
user.metrics.last_updated = chrono::Utc::now();
|
||||
save_user_data(&user_path, &user);
|
||||
|
||||
Some(response)
|
||||
} else {
|
||||
eprintln!(
|
||||
"❌ 実行エラー: {}\n{}",
|
||||
String::from_utf8_lossy(&output.stderr),
|
||||
String::from_utf8_lossy(&output.stdout),
|
||||
);
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
148
src/cli.rs
148
src/cli.rs
@ -1,100 +1,64 @@
|
||||
// src/cli.rs
|
||||
use std::path::{Path};
|
||||
use chrono::{Duration, Local};
|
||||
use rusqlite::Connection;
|
||||
use clap::{Parser, Subcommand};
|
||||
|
||||
use seahorse::{App, Command, Context};
|
||||
|
||||
use crate::utils::{load_config, save_config};
|
||||
use crate::config::ConfigPaths;
|
||||
use crate::agent::AIState;
|
||||
use crate::commands::db::{save_cmd, export_cmd};
|
||||
use crate::commands::scheduler::{scheduler_cmd};
|
||||
use crate::commands::mcp::mcp_cmd;
|
||||
|
||||
pub fn cli_app() -> App {
|
||||
let set_cmd = Command::new("set")
|
||||
.usage("set [trust|intimacy|curiosity] [value]")
|
||||
.action(|c: &Context| {
|
||||
if c.args.len() != 2 {
|
||||
eprintln!("Usage: set [trust|intimacy|curiosity] [value]");
|
||||
std::process::exit(1);
|
||||
#[derive(Parser)]
|
||||
#[command(name = "aigpt")]
|
||||
#[command(about = "AI GPT CLI with MCP Server and Memory")]
|
||||
pub struct Args {
|
||||
#[command(subcommand)]
|
||||
pub command: Commands,
|
||||
}
|
||||
|
||||
let field = &c.args[0];
|
||||
let value: f32 = c.args[1].parse().unwrap_or_else(|_| {
|
||||
eprintln!("数値で入力してください");
|
||||
std::process::exit(1);
|
||||
});
|
||||
|
||||
// ConfigPathsを使って設定ファイルのパスを取得
|
||||
let config_paths = ConfigPaths::new();
|
||||
let json_path = config_paths.data_file("json");
|
||||
// まだ user.json がない場合、example.json をコピー
|
||||
config_paths.ensure_file_exists("json", Path::new("example.json"));
|
||||
let db_path = config_paths.data_file("db");
|
||||
let mut ai = load_config(json_path.to_str().unwrap());
|
||||
|
||||
match field.as_str() {
|
||||
"trust" => ai.relationship.trust = value,
|
||||
"intimacy" => ai.relationship.intimacy = value,
|
||||
"curiosity" => ai.relationship.curiosity = value,
|
||||
_ => {
|
||||
eprintln!("trust / intimacy / curiosity のいずれかを指定してください");
|
||||
std::process::exit(1);
|
||||
#[derive(Subcommand)]
|
||||
pub enum Commands {
|
||||
/// MCP Server management
|
||||
Server {
|
||||
#[command(subcommand)]
|
||||
command: ServerCommands,
|
||||
},
|
||||
/// Chat with AI
|
||||
Chat {
|
||||
/// Message to send
|
||||
message: String,
|
||||
/// Use memory context
|
||||
#[arg(long)]
|
||||
with_memory: bool,
|
||||
},
|
||||
/// Memory management
|
||||
Memory {
|
||||
#[command(subcommand)]
|
||||
command: MemoryCommands,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Subcommand)]
|
||||
pub enum ServerCommands {
|
||||
/// Setup Python MCP server environment
|
||||
Setup,
|
||||
/// Run the MCP server
|
||||
Run,
|
||||
}
|
||||
save_config(json_path.to_str().unwrap(), &ai);
|
||||
|
||||
let conn = Connection::open(db_path.to_str().unwrap()).expect("DB接続失敗");
|
||||
ai.save_to_db(&conn).expect("DB保存失敗");
|
||||
|
||||
println!("✅ {field} を {value} に更新しました");
|
||||
});
|
||||
|
||||
let show_cmd = Command::new("show")
|
||||
.usage("show")
|
||||
.action(|_c: &Context| {
|
||||
// ConfigPathsを使って設定ファイルのパスを取得
|
||||
let config_paths = ConfigPaths::new();
|
||||
let ai = load_config(config_paths.data_file("json").to_str().unwrap());
|
||||
println!("🧠 現在のAI状態:\n{:#?}", ai);
|
||||
});
|
||||
|
||||
let talk_cmd = Command::new("talk")
|
||||
.usage("talk")
|
||||
.action(|_c: &Context| {
|
||||
let config_paths = ConfigPaths::new();
|
||||
let ai = load_config(config_paths.data_file("json").to_str().unwrap());
|
||||
|
||||
let now = Local::now().naive_local();
|
||||
let mut state = AIState {
|
||||
relation_score: 80.0,
|
||||
previous_score: 80.0,
|
||||
decay_rate: ai.messaging.decay_rate,
|
||||
sensitivity: ai.personality.strength,
|
||||
message_threshold: 5.0,
|
||||
last_message_time: now - Duration::days(4),
|
||||
};
|
||||
|
||||
state.update(now);
|
||||
|
||||
if state.should_talk() {
|
||||
println!("💬 AI発話: {}", state.generate_message());
|
||||
} else {
|
||||
println!("🤫 今日は静かにしているみたい...");
|
||||
}
|
||||
});
|
||||
|
||||
App::new("aigpt")
|
||||
.version("0.1.0")
|
||||
.description("AGE system CLI controller")
|
||||
.author("syui")
|
||||
.command(set_cmd)
|
||||
.command(show_cmd)
|
||||
.command(talk_cmd)
|
||||
.command(save_cmd())
|
||||
.command(export_cmd())
|
||||
.command(scheduler_cmd())
|
||||
.command(mcp_cmd())
|
||||
#[derive(Subcommand)]
|
||||
pub enum MemoryCommands {
|
||||
/// Import ChatGPT conversation export file
|
||||
Import {
|
||||
/// Path to ChatGPT export JSON file
|
||||
file: String,
|
||||
},
|
||||
/// Search memories
|
||||
Search {
|
||||
/// Search query
|
||||
query: String,
|
||||
/// Maximum number of results
|
||||
#[arg(short, long, default_value = "10")]
|
||||
limit: usize,
|
||||
},
|
||||
/// List all memories
|
||||
List,
|
||||
/// Show memory details
|
||||
Detail {
|
||||
/// Path to memory file
|
||||
filepath: String,
|
||||
},
|
||||
}
|
||||
|
@ -1,44 +0,0 @@
|
||||
// src/commands/db.rs
|
||||
use seahorse::{Command, Context};
|
||||
use crate::utils::{load_config};
|
||||
use crate::model::AiSystem;
|
||||
use crate::config::ConfigPaths;
|
||||
|
||||
use rusqlite::Connection;
|
||||
use std::fs;
|
||||
|
||||
pub fn save_cmd() -> Command {
|
||||
Command::new("save")
|
||||
.usage("save")
|
||||
.action(|_c: &Context| {
|
||||
let paths = ConfigPaths::new();
|
||||
|
||||
let json_path = paths.data_file("json");
|
||||
let db_path = paths.data_file("db");
|
||||
|
||||
let ai = load_config(json_path.to_str().unwrap());
|
||||
let conn = Connection::open(db_path).expect("DB接続失敗");
|
||||
|
||||
ai.save_to_db(&conn).expect("DB保存失敗");
|
||||
println!("💾 DBに保存完了");
|
||||
})
|
||||
}
|
||||
|
||||
pub fn export_cmd() -> Command {
|
||||
Command::new("export")
|
||||
.usage("export [output.json]")
|
||||
.action(|c: &Context| {
|
||||
let output_path = c.args.get(0).map(|s| s.as_str()).unwrap_or("output.json");
|
||||
|
||||
let paths = ConfigPaths::new();
|
||||
let db_path = paths.data_file("db");
|
||||
|
||||
let conn = Connection::open(db_path).expect("DB接続失敗");
|
||||
let ai = AiSystem::load_from_db(&conn).expect("DB読み込み失敗");
|
||||
|
||||
let json = serde_json::to_string_pretty(&ai).expect("JSON変換失敗");
|
||||
fs::write(output_path, json).expect("ファイル書き込み失敗");
|
||||
|
||||
println!("📤 JSONにエクスポート完了: {output_path}");
|
||||
})
|
||||
}
|
@ -1,17 +0,0 @@
|
||||
// src/commands/git_repo.rs
|
||||
use std::fs;
|
||||
|
||||
// Gitリポジトリ内の全てのファイルを取得し、内容を読み取る
|
||||
pub fn read_all_git_files(repo_path: &str) -> String {
|
||||
let mut content = String::new();
|
||||
for entry in fs::read_dir(repo_path).expect("ディレクトリ読み込み失敗") {
|
||||
let entry = entry.expect("エントリ読み込み失敗");
|
||||
let path = entry.path();
|
||||
if path.is_file() {
|
||||
if let Ok(file_content) = fs::read_to_string(&path) {
|
||||
content.push_str(&format!("\n\n# File: {}\n{}", path.display(), file_content));
|
||||
}
|
||||
}
|
||||
}
|
||||
content
|
||||
}
|
@ -1,277 +0,0 @@
|
||||
// src/commands/mcp.rs
|
||||
|
||||
use std::fs;
|
||||
use std::path::{PathBuf};
|
||||
use std::process::Command as OtherCommand;
|
||||
use serde_json::json;
|
||||
use seahorse::{Command, Context, Flag, FlagType};
|
||||
use crate::chat::ask_chat;
|
||||
use crate::git::{git_init, git_status};
|
||||
use crate::config::ConfigPaths;
|
||||
use crate::commands::git_repo::read_all_git_files;
|
||||
use crate::metrics::{load_user_data, save_user_data};
|
||||
use crate::memory::{log_message};
|
||||
|
||||
pub fn mcp_setup() {
|
||||
let config = ConfigPaths::new();
|
||||
let dest_dir = config.base_dir.join("mcp");
|
||||
let repo_url = "https://github.com/microsoft/MCP.git";
|
||||
println!("📁 MCP ディレクトリ: {}", dest_dir.display());
|
||||
|
||||
// 1. git clone(もしまだなければ)
|
||||
if !dest_dir.exists() {
|
||||
let status = OtherCommand::new("git")
|
||||
.args(&["clone", repo_url, dest_dir.to_str().unwrap()])
|
||||
.status()
|
||||
.expect("git clone に失敗しました");
|
||||
assert!(status.success(), "git clone 実行時にエラーが発生しました");
|
||||
}
|
||||
|
||||
let asset_base = PathBuf::from("mcp");
|
||||
let files_to_copy = vec![
|
||||
"cli.py",
|
||||
"setup.py",
|
||||
"scripts/ask.py",
|
||||
"scripts/server.py",
|
||||
"scripts/config.py",
|
||||
"scripts/summarize.py",
|
||||
"scripts/context_loader.py",
|
||||
"scripts/prompt_template.py",
|
||||
"scripts/memory_store.py",
|
||||
];
|
||||
|
||||
for rel_path in files_to_copy {
|
||||
let src = asset_base.join(rel_path);
|
||||
let dst = dest_dir.join(rel_path);
|
||||
if let Some(parent) = dst.parent() {
|
||||
let _ = fs::create_dir_all(parent);
|
||||
}
|
||||
if let Err(e) = fs::copy(&src, &dst) {
|
||||
eprintln!("❌ コピー失敗: {} → {}: {}", src.display(), dst.display(), e);
|
||||
} else {
|
||||
println!("✅ コピー: {} → {}", src.display(), dst.display());
|
||||
}
|
||||
}
|
||||
|
||||
// venvの作成
|
||||
let venv_path = dest_dir.join(".venv");
|
||||
if !venv_path.exists() {
|
||||
println!("🐍 仮想環境を作成しています...");
|
||||
let output = OtherCommand::new("python3")
|
||||
.args(&["-m", "venv", ".venv"])
|
||||
.current_dir(&dest_dir)
|
||||
.output()
|
||||
.expect("venvの作成に失敗しました");
|
||||
|
||||
if !output.status.success() {
|
||||
eprintln!("❌ venv作成エラー: {}", String::from_utf8_lossy(&output.stderr));
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// `pip install -e .` を仮想環境で実行
|
||||
let pip_path = if cfg!(target_os = "windows") {
|
||||
dest_dir.join(".venv/Scripts/pip.exe").to_string_lossy().to_string()
|
||||
} else {
|
||||
dest_dir.join(".venv/bin/pip").to_string_lossy().to_string()
|
||||
};
|
||||
|
||||
println!("📦 必要なパッケージをインストールしています...");
|
||||
let output = OtherCommand::new(&pip_path)
|
||||
.arg("install")
|
||||
.arg("openai")
|
||||
.arg("requests")
|
||||
.arg("fastmcp")
|
||||
.arg("uvicorn")
|
||||
.arg("fastapi")
|
||||
.arg("fastapi_mcp")
|
||||
.arg("mcp")
|
||||
.current_dir(&dest_dir)
|
||||
.output()
|
||||
.expect("pip install に失敗しました");
|
||||
|
||||
if !output.status.success() {
|
||||
eprintln!(
|
||||
"❌ pip エラー: {}\n{}",
|
||||
String::from_utf8_lossy(&output.stderr),
|
||||
String::from_utf8_lossy(&output.stdout)
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
println!("📦 pip install -e . を実行します...");
|
||||
let output = OtherCommand::new(&pip_path)
|
||||
.arg("install")
|
||||
.arg("-e")
|
||||
.arg(".")
|
||||
.current_dir(&dest_dir)
|
||||
.output()
|
||||
.expect("pip install に失敗しました");
|
||||
|
||||
if output.status.success() {
|
||||
println!("🎉 MCP セットアップが完了しました!");
|
||||
} else {
|
||||
eprintln!(
|
||||
"❌ pip エラー: {}\n{}",
|
||||
String::from_utf8_lossy(&output.stderr),
|
||||
String::from_utf8_lossy(&output.stdout)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
fn set_api_key_cmd() -> Command {
|
||||
Command::new("set-api")
|
||||
.description("OpenAI APIキーを設定")
|
||||
.usage("mcp set-api --api <API_KEY>")
|
||||
.flag(Flag::new("api", FlagType::String).description("OpenAI APIキー").alias("a"))
|
||||
.action(|c: &Context| {
|
||||
if let Ok(api_key) = c.string_flag("api") {
|
||||
let config = ConfigPaths::new();
|
||||
let path = config.base_dir.join("openai.json");
|
||||
let json_data = json!({ "token": api_key });
|
||||
|
||||
if let Err(e) = fs::write(&path, serde_json::to_string_pretty(&json_data).unwrap()) {
|
||||
eprintln!("❌ ファイル書き込み失敗: {}", e);
|
||||
} else {
|
||||
println!("✅ APIキーを保存しました: {}", path.display());
|
||||
}
|
||||
} else {
|
||||
eprintln!("❗ APIキーを --api で指定してください");
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn chat_cmd() -> Command {
|
||||
Command::new("chat")
|
||||
.description("チャットで質問を送る")
|
||||
.usage("mcp chat '質問内容' --host <OLLAMA_HOST> --model <MODEL> [--provider <ollama|openai>] [--api-key <KEY>] [--repo <REPO_URL>]")
|
||||
.flag(
|
||||
Flag::new("host", FlagType::String)
|
||||
.description("OLLAMAホストのURL")
|
||||
.alias("H"),
|
||||
)
|
||||
.flag(
|
||||
Flag::new("model", FlagType::String)
|
||||
.description("モデル名 (OLLAMA_MODEL / OPENAI_MODEL)")
|
||||
.alias("m"),
|
||||
)
|
||||
.flag(
|
||||
Flag::new("provider", FlagType::String)
|
||||
.description("使用するプロバイダ (ollama / openai)")
|
||||
.alias("p"),
|
||||
)
|
||||
.flag(
|
||||
Flag::new("api-key", FlagType::String)
|
||||
.description("OpenAI APIキー")
|
||||
.alias("k"),
|
||||
)
|
||||
.flag(
|
||||
Flag::new("repo", FlagType::String)
|
||||
.description("Gitリポジトリのパスを指定 (すべてのコードを読み込む)")
|
||||
.alias("r"),
|
||||
)
|
||||
.action(|c: &Context| {
|
||||
let config = ConfigPaths::new();
|
||||
let user_path = config.data_file("json");
|
||||
let mut user = load_user_data(&user_path);
|
||||
// repoがある場合は、コードベース読み込みモード
|
||||
if let Ok(repo_url) = c.string_flag("repo") {
|
||||
let repo_base = config.base_dir.join("repos");
|
||||
let repo_dir = repo_base.join(sanitize_repo_name(&repo_url));
|
||||
|
||||
if !repo_dir.exists() {
|
||||
println!("📥 Gitリポジトリをクローン中: {}", repo_url);
|
||||
let status = OtherCommand::new("git")
|
||||
.args(&["clone", &repo_url, repo_dir.to_str().unwrap()])
|
||||
.status()
|
||||
.expect("❌ Gitのクローンに失敗しました");
|
||||
assert!(status.success(), "Git clone エラー");
|
||||
} else {
|
||||
println!("✔ リポジトリはすでに存在します: {}", repo_dir.display());
|
||||
}
|
||||
|
||||
let files = read_all_git_files(repo_dir.to_str().unwrap());
|
||||
let prompt = format!(
|
||||
"以下のコードベースを読み込んで、改善案や次のステップを提案してください:\n{}",
|
||||
files
|
||||
);
|
||||
|
||||
if let Some(response) = ask_chat(c, &prompt) {
|
||||
println!("💬 提案:\n{}", response);
|
||||
} else {
|
||||
eprintln!("❗ 提案が取得できませんでした");
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
// 通常のチャット処理(repoが指定されていない場合)
|
||||
match c.args.get(0) {
|
||||
Some(question) => {
|
||||
log_message(&config.base_dir, "user", question);
|
||||
let response = ask_chat(c, question);
|
||||
|
||||
if let Some(ref text) = response {
|
||||
println!("💬 応答:\n{}", text);
|
||||
// 返答内容に基づいて増減(返答の感情解析)
|
||||
if text.contains("thank") || text.contains("great") {
|
||||
user.metrics.trust += 0.05;
|
||||
} else if text.contains("hate") || text.contains("bad") {
|
||||
user.metrics.trust -= 0.05;
|
||||
}
|
||||
log_message(&config.base_dir, "ai", &text);
|
||||
save_user_data(&user_path, &user);
|
||||
} else {
|
||||
eprintln!("❗ 応答が取得できませんでした");
|
||||
}
|
||||
}
|
||||
None => {
|
||||
eprintln!("❗ 質問が必要です: mcp chat 'こんにちは'");
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn init_cmd() -> Command {
|
||||
Command::new("init")
|
||||
.description("Git 初期化")
|
||||
.usage("mcp init")
|
||||
.action(|_| {
|
||||
git_init();
|
||||
})
|
||||
}
|
||||
|
||||
fn status_cmd() -> Command {
|
||||
Command::new("status")
|
||||
.description("Git ステータス表示")
|
||||
.usage("mcp status")
|
||||
.action(|_| {
|
||||
git_status();
|
||||
})
|
||||
}
|
||||
|
||||
fn setup_cmd() -> Command {
|
||||
Command::new("setup")
|
||||
.description("MCP の初期セットアップ")
|
||||
.usage("mcp setup")
|
||||
.action(|_| {
|
||||
mcp_setup();
|
||||
})
|
||||
}
|
||||
|
||||
pub fn mcp_cmd() -> Command {
|
||||
Command::new("mcp")
|
||||
.description("MCP操作コマンド")
|
||||
.usage("mcp <subcommand>")
|
||||
.alias("m")
|
||||
.command(chat_cmd())
|
||||
.command(init_cmd())
|
||||
.command(status_cmd())
|
||||
.command(setup_cmd())
|
||||
.command(set_api_key_cmd())
|
||||
}
|
||||
|
||||
// ファイル名として安全な形に変換
|
||||
fn sanitize_repo_name(repo_url: &str) -> String {
|
||||
repo_url.replace("://", "_").replace("/", "_").replace("@", "_")
|
||||
}
|
@ -1,4 +0,0 @@
|
||||
pub mod db;
|
||||
pub mod scheduler;
|
||||
pub mod mcp;
|
||||
pub mod git_repo;
|
@ -1,127 +0,0 @@
|
||||
// src/commands/scheduler.rs
|
||||
use seahorse::{Command, Context};
|
||||
use std::thread;
|
||||
use std::time::Duration;
|
||||
use chrono::{Local, Utc, Timelike};
|
||||
use crate::metrics::{load_user_data, save_user_data};
|
||||
use crate::config::ConfigPaths;
|
||||
use crate::chat::ask_chat;
|
||||
use rand::prelude::*;
|
||||
use rand::rng;
|
||||
|
||||
fn send_scheduled_message() {
|
||||
let config = ConfigPaths::new();
|
||||
let user_path = config.data_file("json");
|
||||
let mut user = load_user_data(&user_path);
|
||||
|
||||
if !user.metrics.can_send {
|
||||
println!("🚫 送信条件を満たしていないため、スケジュール送信スキップ");
|
||||
return;
|
||||
}
|
||||
|
||||
// 日付の比較(1日1回制限)
|
||||
let today = Local::now().format("%Y-%m-%d").to_string();
|
||||
if let Some(last_date) = &user.messaging.last_sent_date {
|
||||
if last_date != &today {
|
||||
user.messaging.sent_today = false;
|
||||
}
|
||||
} else {
|
||||
user.messaging.sent_today = false;
|
||||
}
|
||||
|
||||
if user.messaging.sent_today {
|
||||
println!("🔁 本日はすでに送信済みです: {}", today);
|
||||
return;
|
||||
}
|
||||
|
||||
if let Some(schedule_str) = &user.messaging.schedule_time {
|
||||
let now = Local::now();
|
||||
let target: Vec<&str> = schedule_str.split(':').collect();
|
||||
|
||||
if target.len() != 2 {
|
||||
println!("⚠️ schedule_time形式が無効です: {}", schedule_str);
|
||||
return;
|
||||
}
|
||||
|
||||
let (sh, sm) = (target[0].parse::<u32>(), target[1].parse::<u32>());
|
||||
if let (Ok(sh), Ok(sm)) = (sh, sm) {
|
||||
if now.hour() == sh && now.minute() == sm {
|
||||
if let Some(msg) = user.messaging.templates.choose(&mut rng()) {
|
||||
println!("💬 自動送信メッセージ: {}", msg);
|
||||
let dummy_context = Context::new(vec![], None, "".to_string());
|
||||
ask_chat(&dummy_context, msg);
|
||||
user.metrics.intimacy += 0.03;
|
||||
|
||||
// 送信済みのフラグ更新
|
||||
user.messaging.sent_today = true;
|
||||
user.messaging.last_sent_date = Some(today);
|
||||
|
||||
save_user_data(&user_path, &user);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
pub fn scheduler_cmd() -> Command {
|
||||
Command::new("scheduler")
|
||||
.usage("scheduler [interval_sec]")
|
||||
.alias("s")
|
||||
.description("定期的に送信条件をチェックし、自発的なメッセージ送信を試みる")
|
||||
.action(|c: &Context| {
|
||||
let interval = c.args.get(0)
|
||||
.and_then(|s| s.parse::<u64>().ok())
|
||||
.unwrap_or(3600); // デフォルト: 1時間(テストしやすく)
|
||||
|
||||
println!("⏳ スケジューラー開始({}秒ごと)...", interval);
|
||||
|
||||
loop {
|
||||
let config = ConfigPaths::new();
|
||||
let user_path = config.data_file("json");
|
||||
let mut user = load_user_data(&user_path);
|
||||
|
||||
let now = Utc::now();
|
||||
let elapsed = now.signed_duration_since(user.metrics.last_updated);
|
||||
let hours = elapsed.num_minutes() as f32 / 60.0;
|
||||
|
||||
let speed_factor = if hours > 48.0 {
|
||||
2.0
|
||||
} else if hours > 24.0 {
|
||||
1.5
|
||||
} else {
|
||||
1.0
|
||||
};
|
||||
|
||||
user.metrics.trust = (user.metrics.trust - 0.01 * speed_factor).clamp(0.0, 1.0);
|
||||
user.metrics.intimacy = (user.metrics.intimacy - 0.01 * speed_factor).clamp(0.0, 1.0);
|
||||
user.metrics.energy = (user.metrics.energy - 0.01 * speed_factor).clamp(0.0, 1.0);
|
||||
|
||||
user.metrics.can_send =
|
||||
user.metrics.trust >= 0.5 &&
|
||||
user.metrics.intimacy >= 0.5 &&
|
||||
user.metrics.energy >= 0.5;
|
||||
|
||||
user.metrics.last_updated = now;
|
||||
|
||||
if user.metrics.can_send {
|
||||
println!("💡 AIメッセージ送信条件を満たしています(信頼:{:.2}, 親密:{:.2}, エネルギー:{:.2})",
|
||||
user.metrics.trust,
|
||||
user.metrics.intimacy,
|
||||
user.metrics.energy
|
||||
);
|
||||
send_scheduled_message();
|
||||
} else {
|
||||
println!("🤫 条件未達成のため送信スキップ: trust={:.2}, intimacy={:.2}, energy={:.2}",
|
||||
user.metrics.trust,
|
||||
user.metrics.intimacy,
|
||||
user.metrics.energy
|
||||
);
|
||||
}
|
||||
|
||||
save_user_data(&user_path, &user);
|
||||
thread::sleep(Duration::from_secs(interval));
|
||||
}
|
||||
})
|
||||
}
|
||||
|
@ -22,6 +22,7 @@ impl ConfigPaths {
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn data_file(&self, file_name: &str) -> PathBuf {
|
||||
let file_path = match file_name {
|
||||
"db" => self.base_dir.join("user.db"),
|
||||
@ -29,18 +30,30 @@ impl ConfigPaths {
|
||||
"json" => self.base_dir.join("user.json"),
|
||||
_ => self.base_dir.join(format!(".{}", file_name)),
|
||||
};
|
||||
|
||||
file_path
|
||||
}
|
||||
/// 設定ファイルがなければ `example.json` をコピーする
|
||||
pub fn ensure_file_exists(&self, file_name: &str, template_path: &Path) {
|
||||
let target = self.data_file(file_name);
|
||||
if !target.exists() {
|
||||
if let Err(e) = fs::copy(template_path, &target) {
|
||||
eprintln!("⚠️ 設定ファイルの初期化に失敗しました: {}", e);
|
||||
|
||||
pub fn mcp_dir(&self) -> PathBuf {
|
||||
self.base_dir.join("mcp")
|
||||
}
|
||||
|
||||
pub fn venv_path(&self) -> PathBuf {
|
||||
self.mcp_dir().join(".venv")
|
||||
}
|
||||
|
||||
pub fn python_executable(&self) -> PathBuf {
|
||||
if cfg!(windows) {
|
||||
self.venv_path().join("Scripts").join("python.exe")
|
||||
} else {
|
||||
println!("📄 {} を {} にコピーしました", template_path.display(), target.display());
|
||||
}
|
||||
self.venv_path().join("bin").join("python")
|
||||
}
|
||||
}
|
||||
|
||||
pub fn pip_executable(&self) -> PathBuf {
|
||||
if cfg!(windows) {
|
||||
self.venv_path().join("Scripts").join("pip.exe")
|
||||
} else {
|
||||
self.venv_path().join("bin").join("pip")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
42
src/git.rs
42
src/git.rs
@ -1,42 +0,0 @@
|
||||
// src/git.rs
|
||||
use std::process::Command;
|
||||
|
||||
pub fn git_status() {
|
||||
run_git_command(&["status"]);
|
||||
}
|
||||
|
||||
pub fn git_init() {
|
||||
run_git_command(&["init"]);
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn git_commit(message: &str) {
|
||||
run_git_command(&["add", "."]);
|
||||
run_git_command(&["commit", "-m", message]);
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn git_push() {
|
||||
run_git_command(&["push"]);
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn git_pull() {
|
||||
run_git_command(&["pull"]);
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn git_branch() {
|
||||
run_git_command(&["branch"]);
|
||||
}
|
||||
|
||||
fn run_git_command(args: &[&str]) {
|
||||
let status = Command::new("git")
|
||||
.args(args)
|
||||
.status()
|
||||
.expect("git コマンドの実行に失敗しました");
|
||||
|
||||
if !status.success() {
|
||||
eprintln!("⚠️ git コマンドに失敗しました: {:?}", args);
|
||||
}
|
||||
}
|
13
src/logic.rs
13
src/logic.rs
@ -1,13 +0,0 @@
|
||||
//src/logic.rs
|
||||
use crate::model::AiSystem;
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn should_send(ai: &AiSystem) -> bool {
|
||||
let r = &ai.relationship;
|
||||
let env = &ai.environment;
|
||||
let score = r.trust + r.intimacy + r.curiosity;
|
||||
let relationship_ok = score >= r.threshold;
|
||||
let luck_ok = env.luck_today > 0.5;
|
||||
|
||||
ai.messaging.enabled && relationship_ok && luck_ok
|
||||
}
|
69
src/main.rs
69
src/main.rs
@ -1,21 +1,58 @@
|
||||
//src/main.rs
|
||||
mod model;
|
||||
mod logic;
|
||||
mod agent;
|
||||
// main.rs
|
||||
mod cli;
|
||||
mod utils;
|
||||
mod commands;
|
||||
mod config;
|
||||
mod git;
|
||||
mod chat;
|
||||
mod metrics;
|
||||
mod memory;
|
||||
mod mcp;
|
||||
|
||||
use cli::cli_app;
|
||||
use seahorse::App;
|
||||
use cli::{Args, Commands, ServerCommands, MemoryCommands};
|
||||
use clap::Parser;
|
||||
|
||||
fn main() {
|
||||
let args: Vec<String> = std::env::args().collect();
|
||||
let app: App = cli_app();
|
||||
app.run(args);
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
let args = Args::parse();
|
||||
|
||||
match args.command {
|
||||
Commands::Server { command } => {
|
||||
match command {
|
||||
ServerCommands::Setup => {
|
||||
mcp::server::setup();
|
||||
}
|
||||
ServerCommands::Run => {
|
||||
mcp::server::run().await;
|
||||
}
|
||||
}
|
||||
}
|
||||
Commands::Chat { message, with_memory } => {
|
||||
if with_memory {
|
||||
if let Err(e) = mcp::memory::handle_chat_with_memory(&message).await {
|
||||
eprintln!("❌ 記憶チャットエラー: {}", e);
|
||||
}
|
||||
} else {
|
||||
mcp::server::chat(&message).await;
|
||||
}
|
||||
}
|
||||
Commands::Memory { command } => {
|
||||
match command {
|
||||
MemoryCommands::Import { file } => {
|
||||
if let Err(e) = mcp::memory::handle_import(&file).await {
|
||||
eprintln!("❌ インポートエラー: {}", e);
|
||||
}
|
||||
}
|
||||
MemoryCommands::Search { query, limit } => {
|
||||
if let Err(e) = mcp::memory::handle_search(&query, limit).await {
|
||||
eprintln!("❌ 検索エラー: {}", e);
|
||||
}
|
||||
}
|
||||
MemoryCommands::List => {
|
||||
if let Err(e) = mcp::memory::handle_list().await {
|
||||
eprintln!("❌ 一覧取得エラー: {}", e);
|
||||
}
|
||||
}
|
||||
MemoryCommands::Detail { filepath } => {
|
||||
if let Err(e) = mcp::memory::handle_detail(&filepath).await {
|
||||
eprintln!("❌ 詳細取得エラー: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
393
src/mcp/memory.rs
Normal file
393
src/mcp/memory.rs
Normal file
@ -0,0 +1,393 @@
|
||||
// src/mcp/memory.rs
|
||||
use reqwest;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::{self, Value};
|
||||
use std::fs;
|
||||
use std::path::Path;
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct MemorySearchRequest {
|
||||
pub query: String,
|
||||
pub limit: usize,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct ChatRequest {
|
||||
pub message: String,
|
||||
pub model: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct ConversationImportRequest {
|
||||
pub conversation_data: Value,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct ApiResponse {
|
||||
pub success: bool,
|
||||
pub error: Option<String>,
|
||||
#[allow(dead_code)]
|
||||
pub message: Option<String>,
|
||||
pub filepath: Option<String>,
|
||||
pub results: Option<Vec<MemoryResult>>,
|
||||
pub memories: Option<Vec<MemoryResult>>,
|
||||
#[allow(dead_code)]
|
||||
pub count: Option<usize>,
|
||||
pub memory: Option<Value>,
|
||||
pub response: Option<String>,
|
||||
pub memories_used: Option<usize>,
|
||||
pub imported_count: Option<usize>,
|
||||
pub total_count: Option<usize>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct MemoryResult {
|
||||
#[allow(dead_code)]
|
||||
pub filepath: String,
|
||||
pub title: Option<String>,
|
||||
pub summary: Option<String>,
|
||||
pub source: Option<String>,
|
||||
pub import_time: Option<String>,
|
||||
pub message_count: Option<usize>,
|
||||
}
|
||||
|
||||
pub struct MemoryClient {
|
||||
base_url: String,
|
||||
client: reqwest::Client,
|
||||
}
|
||||
|
||||
impl MemoryClient {
|
||||
pub fn new(base_url: Option<String>) -> Self {
|
||||
let url = base_url.unwrap_or_else(|| "http://127.0.0.1:5000".to_string());
|
||||
Self {
|
||||
base_url: url,
|
||||
client: reqwest::Client::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn import_chatgpt_file(&self, filepath: &str) -> Result<ApiResponse, Box<dyn std::error::Error>> {
|
||||
// ファイルを読み込み
|
||||
let content = fs::read_to_string(filepath)?;
|
||||
let json_data: Value = serde_json::from_str(&content)?;
|
||||
|
||||
// 配列かどうかチェック
|
||||
match json_data.as_array() {
|
||||
Some(conversations) => {
|
||||
// 複数の会話をインポート
|
||||
let mut imported_count = 0;
|
||||
let total_count = conversations.len();
|
||||
|
||||
for conversation in conversations {
|
||||
match self.import_single_conversation(conversation.clone()).await {
|
||||
Ok(response) => {
|
||||
if response.success {
|
||||
imported_count += 1;
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
eprintln!("❌ インポートエラー: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(ApiResponse {
|
||||
success: true,
|
||||
imported_count: Some(imported_count),
|
||||
total_count: Some(total_count),
|
||||
error: None,
|
||||
message: Some(format!("{}個中{}個の会話をインポートしました", total_count, imported_count)),
|
||||
filepath: None,
|
||||
results: None,
|
||||
memories: None,
|
||||
count: None,
|
||||
memory: None,
|
||||
response: None,
|
||||
memories_used: None,
|
||||
})
|
||||
}
|
||||
None => {
|
||||
// 単一の会話をインポート
|
||||
self.import_single_conversation(json_data).await
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn import_single_conversation(&self, conversation_data: Value) -> Result<ApiResponse, Box<dyn std::error::Error>> {
|
||||
let request = ConversationImportRequest { conversation_data };
|
||||
|
||||
let response = self.client
|
||||
.post(&format!("{}/memory/import/chatgpt", self.base_url))
|
||||
.json(&request)
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
let result: ApiResponse = response.json().await?;
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
pub async fn search_memories(&self, query: &str, limit: usize) -> Result<ApiResponse, Box<dyn std::error::Error>> {
|
||||
let request = MemorySearchRequest {
|
||||
query: query.to_string(),
|
||||
limit,
|
||||
};
|
||||
|
||||
let response = self.client
|
||||
.post(&format!("{}/memory/search", self.base_url))
|
||||
.json(&request)
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
let result: ApiResponse = response.json().await?;
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
pub async fn list_memories(&self) -> Result<ApiResponse, Box<dyn std::error::Error>> {
|
||||
let response = self.client
|
||||
.get(&format!("{}/memory/list", self.base_url))
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
let result: ApiResponse = response.json().await?;
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
pub async fn get_memory_detail(&self, filepath: &str) -> Result<ApiResponse, Box<dyn std::error::Error>> {
|
||||
let response = self.client
|
||||
.get(&format!("{}/memory/detail", self.base_url))
|
||||
.query(&[("filepath", filepath)])
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
let result: ApiResponse = response.json().await?;
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
pub async fn chat_with_memory(&self, message: &str) -> Result<ApiResponse, Box<dyn std::error::Error>> {
|
||||
let request = ChatRequest {
|
||||
message: message.to_string(),
|
||||
model: None,
|
||||
};
|
||||
|
||||
let response = self.client
|
||||
.post(&format!("{}/chat", self.base_url))
|
||||
.json(&request)
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
let result: ApiResponse = response.json().await?;
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
pub async fn is_server_running(&self) -> bool {
|
||||
match self.client.get(&self.base_url).send().await {
|
||||
Ok(response) => response.status().is_success(),
|
||||
Err(_) => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn handle_import(filepath: &str) -> Result<(), Box<dyn std::error::Error>> {
|
||||
if !Path::new(filepath).exists() {
|
||||
eprintln!("❌ ファイルが見つかりません: {}", filepath);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let client = MemoryClient::new(None);
|
||||
|
||||
// サーバーが起動しているかチェック
|
||||
if !client.is_server_running().await {
|
||||
eprintln!("❌ MCP Serverが起動していません。先に 'aigpt server run' を実行してください。");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
println!("🔄 ChatGPT会話をインポートしています: {}", filepath);
|
||||
|
||||
match client.import_chatgpt_file(filepath).await {
|
||||
Ok(response) => {
|
||||
if response.success {
|
||||
if let (Some(imported), Some(total)) = (response.imported_count, response.total_count) {
|
||||
println!("✅ {}個中{}個の会話をインポートしました", total, imported);
|
||||
} else {
|
||||
println!("✅ 会話をインポートしました");
|
||||
if let Some(path) = response.filepath {
|
||||
println!("📁 保存先: {}", path);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
eprintln!("❌ インポートに失敗: {:?}", response.error);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
eprintln!("❌ インポートエラー: {}", e);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn handle_search(query: &str, limit: usize) -> Result<(), Box<dyn std::error::Error>> {
|
||||
let client = MemoryClient::new(None);
|
||||
|
||||
if !client.is_server_running().await {
|
||||
eprintln!("❌ MCP Serverが起動していません。先に 'aigpt server run' を実行してください。");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
println!("🔍 記憶を検索しています: {}", query);
|
||||
|
||||
match client.search_memories(query, limit).await {
|
||||
Ok(response) => {
|
||||
if response.success {
|
||||
if let Some(results) = response.results {
|
||||
println!("📚 {}個の記憶が見つかりました:", results.len());
|
||||
for memory in results {
|
||||
println!(" • {}", memory.title.unwrap_or_else(|| "タイトルなし".to_string()));
|
||||
if let Some(summary) = memory.summary {
|
||||
println!(" 概要: {}", summary);
|
||||
}
|
||||
if let Some(count) = memory.message_count {
|
||||
println!(" メッセージ数: {}", count);
|
||||
}
|
||||
println!();
|
||||
}
|
||||
} else {
|
||||
println!("📚 記憶が見つかりませんでした");
|
||||
}
|
||||
} else {
|
||||
eprintln!("❌ 検索に失敗: {:?}", response.error);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
eprintln!("❌ 検索エラー: {}", e);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn handle_list() -> Result<(), Box<dyn std::error::Error>> {
|
||||
let client = MemoryClient::new(None);
|
||||
|
||||
if !client.is_server_running().await {
|
||||
eprintln!("❌ MCP Serverが起動していません。先に 'aigpt server run' を実行してください。");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
println!("📋 記憶一覧を取得しています...");
|
||||
|
||||
match client.list_memories().await {
|
||||
Ok(response) => {
|
||||
if response.success {
|
||||
if let Some(memories) = response.memories {
|
||||
println!("📚 総記憶数: {}", memories.len());
|
||||
for memory in memories {
|
||||
println!(" • {}", memory.title.unwrap_or_else(|| "タイトルなし".to_string()));
|
||||
if let Some(source) = memory.source {
|
||||
println!(" ソース: {}", source);
|
||||
}
|
||||
if let Some(count) = memory.message_count {
|
||||
println!(" メッセージ数: {}", count);
|
||||
}
|
||||
if let Some(import_time) = memory.import_time {
|
||||
println!(" インポート時刻: {}", import_time);
|
||||
}
|
||||
println!();
|
||||
}
|
||||
} else {
|
||||
println!("📚 記憶がありません");
|
||||
}
|
||||
} else {
|
||||
eprintln!("❌ 一覧取得に失敗: {:?}", response.error);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
eprintln!("❌ 一覧取得エラー: {}", e);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn handle_detail(filepath: &str) -> Result<(), Box<dyn std::error::Error>> {
|
||||
let client = MemoryClient::new(None);
|
||||
|
||||
if !client.is_server_running().await {
|
||||
eprintln!("❌ MCP Serverが起動していません。先に 'aigpt server run' を実行してください。");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
println!("📄 記憶の詳細を取得しています: {}", filepath);
|
||||
|
||||
match client.get_memory_detail(filepath).await {
|
||||
Ok(response) => {
|
||||
if response.success {
|
||||
if let Some(memory) = response.memory {
|
||||
if let Some(title) = memory.get("title").and_then(|v| v.as_str()) {
|
||||
println!("タイトル: {}", title);
|
||||
}
|
||||
if let Some(source) = memory.get("source").and_then(|v| v.as_str()) {
|
||||
println!("ソース: {}", source);
|
||||
}
|
||||
if let Some(summary) = memory.get("summary").and_then(|v| v.as_str()) {
|
||||
println!("概要: {}", summary);
|
||||
}
|
||||
if let Some(messages) = memory.get("messages").and_then(|v| v.as_array()) {
|
||||
println!("メッセージ数: {}", messages.len());
|
||||
println!("\n最近のメッセージ:");
|
||||
for msg in messages.iter().take(5) {
|
||||
if let (Some(role), Some(content)) = (
|
||||
msg.get("role").and_then(|v| v.as_str()),
|
||||
msg.get("content").and_then(|v| v.as_str())
|
||||
) {
|
||||
let content_preview = if content.len() > 100 {
|
||||
format!("{}...", &content[..100])
|
||||
} else {
|
||||
content.to_string()
|
||||
};
|
||||
println!(" {}: {}", role, content_preview);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
eprintln!("❌ 詳細取得に失敗: {:?}", response.error);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
eprintln!("❌ 詳細取得エラー: {}", e);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn handle_chat_with_memory(message: &str) -> Result<(), Box<dyn std::error::Error>> {
|
||||
let client = MemoryClient::new(None);
|
||||
|
||||
if !client.is_server_running().await {
|
||||
eprintln!("❌ MCP Serverが起動していません。先に 'aigpt server run' を実行してください。");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
println!("💬 記憶を活用してチャットしています...");
|
||||
|
||||
match client.chat_with_memory(message).await {
|
||||
Ok(response) => {
|
||||
if response.success {
|
||||
if let Some(reply) = response.response {
|
||||
println!("🤖 {}", reply);
|
||||
}
|
||||
if let Some(memories_used) = response.memories_used {
|
||||
println!("📚 使用した記憶数: {}", memories_used);
|
||||
}
|
||||
} else {
|
||||
eprintln!("❌ チャットに失敗: {:?}", response.error);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
eprintln!("❌ チャットエラー: {}", e);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
3
src/mcp/mod.rs
Normal file
3
src/mcp/mod.rs
Normal file
@ -0,0 +1,3 @@
|
||||
// src/mcp/mod.rs
|
||||
pub mod server;
|
||||
pub mod memory;
|
147
src/mcp/server.rs
Normal file
147
src/mcp/server.rs
Normal file
@ -0,0 +1,147 @@
|
||||
// src/mcp/server.rs
|
||||
use crate::config::ConfigPaths;
|
||||
//use std::fs;
|
||||
use std::process::Command as OtherCommand;
|
||||
use std::env;
|
||||
use fs_extra::dir::{copy, CopyOptions};
|
||||
|
||||
pub fn setup() {
|
||||
println!("🔧 MCP Server環境をセットアップしています...");
|
||||
let config = ConfigPaths::new();
|
||||
let mcp_dir = config.mcp_dir();
|
||||
|
||||
// プロジェクトのmcp/ディレクトリからファイルをコピー
|
||||
let current_dir = env::current_dir().expect("現在のディレクトリを取得できません");
|
||||
let project_mcp_dir = current_dir.join("mcp");
|
||||
if !project_mcp_dir.exists() {
|
||||
eprintln!("❌ プロジェクトのmcp/ディレクトリが見つかりません: {}", project_mcp_dir.display());
|
||||
return;
|
||||
}
|
||||
|
||||
if mcp_dir.exists() {
|
||||
fs_extra::dir::remove(&mcp_dir).expect("既存のmcp_dirの削除に失敗しました");
|
||||
}
|
||||
|
||||
let mut options = CopyOptions::new();
|
||||
options.overwrite = true; // 上書き
|
||||
options.copy_inside = true; // 中身だけコピー
|
||||
|
||||
copy(&project_mcp_dir, &mcp_dir, &options).expect("コピーに失敗しました");
|
||||
|
||||
// 仮想環境の作成
|
||||
let venv_path = config.venv_path();
|
||||
if !venv_path.exists() {
|
||||
println!("🐍 仮想環境を作成しています...");
|
||||
let output = OtherCommand::new("python3")
|
||||
.args(&["-m", "venv", ".venv"])
|
||||
.current_dir(&mcp_dir)
|
||||
.output()
|
||||
.expect("venvの作成に失敗しました");
|
||||
|
||||
if !output.status.success() {
|
||||
eprintln!("❌ venv作成エラー: {}", String::from_utf8_lossy(&output.stderr));
|
||||
return;
|
||||
}
|
||||
println!("✅ 仮想環境を作成しました");
|
||||
} else {
|
||||
println!("✅ 仮想環境は既に存在します");
|
||||
}
|
||||
|
||||
// 依存関係のインストール
|
||||
println!("📦 依存関係をインストールしています...");
|
||||
let pip_path = config.pip_executable();
|
||||
let output = OtherCommand::new(&pip_path)
|
||||
.args(&["install", "-r", "requirements.txt"])
|
||||
.current_dir(&mcp_dir)
|
||||
.output()
|
||||
.expect("pipコマンドの実行に失敗しました");
|
||||
|
||||
if !output.status.success() {
|
||||
eprintln!("❌ pip installエラー: {}", String::from_utf8_lossy(&output.stderr));
|
||||
return;
|
||||
}
|
||||
|
||||
println!("✅ MCP Server環境のセットアップが完了しました!");
|
||||
println!("📍 セットアップ場所: {}", mcp_dir.display());
|
||||
}
|
||||
|
||||
pub async fn run() {
|
||||
println!("🚀 MCP Serverを起動しています...");
|
||||
|
||||
let config = ConfigPaths::new();
|
||||
let mcp_dir = config.mcp_dir();
|
||||
let python_path = config.python_executable();
|
||||
let server_py_path = mcp_dir.join("server.py");
|
||||
|
||||
// セットアップの確認
|
||||
if !server_py_path.exists() {
|
||||
eprintln!("❌ server.pyが見つかりません。先に 'aigpt server setup' を実行してください。");
|
||||
return;
|
||||
}
|
||||
|
||||
if !python_path.exists() {
|
||||
eprintln!("❌ Python実行ファイルが見つかりません。先に 'aigpt server setup' を実行してください。");
|
||||
return;
|
||||
}
|
||||
|
||||
// サーバーの起動
|
||||
println!("🔗 サーバーを起動中... (Ctrl+Cで停止)");
|
||||
let mut child = OtherCommand::new(&python_path)
|
||||
.arg("server.py")
|
||||
.current_dir(&mcp_dir)
|
||||
.spawn()
|
||||
.expect("MCP Serverの起動に失敗しました");
|
||||
|
||||
// サーバーの終了を待機
|
||||
match child.wait() {
|
||||
Ok(status) => {
|
||||
if status.success() {
|
||||
println!("✅ MCP Serverが正常に終了しました");
|
||||
} else {
|
||||
println!("❌ MCP Serverが異常終了しました: {}", status);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
eprintln!("❌ MCP Serverの実行中にエラーが発生しました: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn chat(message: &str) {
|
||||
println!("💬 チャットを開始しています...");
|
||||
|
||||
let config = ConfigPaths::new();
|
||||
let mcp_dir = config.mcp_dir();
|
||||
let python_path = config.python_executable();
|
||||
let chat_py_path = mcp_dir.join("chat.py");
|
||||
|
||||
// セットアップの確認
|
||||
if !chat_py_path.exists() {
|
||||
eprintln!("❌ chat.pyが見つかりません。先に 'aigpt server setup' を実行してください。");
|
||||
return;
|
||||
}
|
||||
|
||||
if !python_path.exists() {
|
||||
eprintln!("❌ Python実行ファイルが見つかりません。先に 'aigpt server setup' を実行してください。");
|
||||
return;
|
||||
}
|
||||
|
||||
// チャットの実行
|
||||
let output = OtherCommand::new(&python_path)
|
||||
.args(&["chat.py", message])
|
||||
.current_dir(&mcp_dir)
|
||||
.output()
|
||||
.expect("chat.pyの実行に失敗しました");
|
||||
|
||||
if output.status.success() {
|
||||
let stdout = String::from_utf8_lossy(&output.stdout);
|
||||
let stderr = String::from_utf8_lossy(&output.stderr);
|
||||
|
||||
if !stderr.is_empty() {
|
||||
print!("{}", stderr);
|
||||
}
|
||||
print!("{}", stdout);
|
||||
} else {
|
||||
eprintln!("❌ チャット実行エラー: {}", String::from_utf8_lossy(&output.stderr));
|
||||
}
|
||||
}
|
@ -1,49 +0,0 @@
|
||||
// src/memory.rs
|
||||
use chrono::{DateTime, Local, Utc};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::fs::{self};
|
||||
//use std::fs::{self, OpenOptions};
|
||||
use std::io::{BufReader, BufWriter};
|
||||
use std::path::PathBuf;
|
||||
use std::{fs::File};
|
||||
//use std::{env, fs::File};
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct MemoryEntry {
|
||||
pub timestamp: DateTime<Utc>,
|
||||
pub sender: String,
|
||||
pub message: String,
|
||||
}
|
||||
|
||||
pub fn log_message(base_dir: &PathBuf, sender: &str, message: &str) {
|
||||
let now_utc = Utc::now();
|
||||
let date_str = Local::now().format("%Y-%m-%d").to_string();
|
||||
let mut file_path = base_dir.clone();
|
||||
file_path.push("memory");
|
||||
let _ = fs::create_dir_all(&file_path);
|
||||
file_path.push(format!("{}.json", date_str));
|
||||
|
||||
let new_entry = MemoryEntry {
|
||||
timestamp: now_utc,
|
||||
sender: sender.to_string(),
|
||||
message: message.to_string(),
|
||||
};
|
||||
|
||||
let mut entries = if file_path.exists() {
|
||||
let file = File::open(&file_path).expect("💥 メモリファイルの読み込み失敗");
|
||||
let reader = BufReader::new(file);
|
||||
serde_json::from_reader(reader).unwrap_or_else(|_| vec![])
|
||||
} else {
|
||||
vec![]
|
||||
};
|
||||
|
||||
entries.push(new_entry);
|
||||
|
||||
let file = File::create(&file_path).expect("💥 メモリファイルの書き込み失敗");
|
||||
let writer = BufWriter::new(file);
|
||||
serde_json::to_writer_pretty(writer, &entries).expect("💥 JSONの書き込み失敗");
|
||||
}
|
||||
|
||||
// 利用例(ask_chatの中)
|
||||
// log_message(&config.base_dir, "user", question);
|
||||
// log_message(&config.base_dir, "ai", &response);
|
147
src/metrics.rs
147
src/metrics.rs
@ -1,147 +0,0 @@
|
||||
// src/metrics.rs
|
||||
use chrono::{DateTime, Utc};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::fs;
|
||||
use std::path::Path;
|
||||
|
||||
use crate::config::ConfigPaths;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Metrics {
|
||||
pub trust: f32,
|
||||
pub intimacy: f32,
|
||||
pub energy: f32,
|
||||
pub can_send: bool,
|
||||
pub last_updated: DateTime<Utc>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Personality {
|
||||
pub kind: String,
|
||||
pub strength: f32,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Relationship {
|
||||
pub trust: f32,
|
||||
pub intimacy: f32,
|
||||
pub curiosity: f32,
|
||||
pub threshold: f32,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Environment {
|
||||
pub luck_today: f32,
|
||||
pub luck_history: Vec<f32>,
|
||||
pub level: i32,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Messaging {
|
||||
pub enabled: bool,
|
||||
pub schedule_time: Option<String>,
|
||||
pub decay_rate: f32,
|
||||
pub templates: Vec<String>,
|
||||
pub sent_today: bool, // 追加
|
||||
pub last_sent_date: Option<String>, // 追加
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Memory {
|
||||
pub recent_messages: Vec<String>,
|
||||
pub long_term_notes: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct UserData {
|
||||
pub personality: Personality,
|
||||
pub relationship: Relationship,
|
||||
pub environment: Environment,
|
||||
pub messaging: Messaging,
|
||||
pub last_interaction: DateTime<Utc>,
|
||||
pub memory: Memory,
|
||||
pub metrics: Metrics,
|
||||
}
|
||||
|
||||
impl Metrics {
|
||||
pub fn decay(&mut self) {
|
||||
let now = Utc::now();
|
||||
let hours = (now - self.last_updated).num_minutes() as f32 / 60.0;
|
||||
self.trust = decay_param(self.trust, hours);
|
||||
self.intimacy = decay_param(self.intimacy, hours);
|
||||
self.energy = decay_param(self.energy, hours);
|
||||
self.can_send = self.trust >= 0.5 && self.intimacy >= 0.5 && self.energy >= 0.5;
|
||||
self.last_updated = now;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn load_user_data(path: &Path) -> UserData {
|
||||
let config = ConfigPaths::new();
|
||||
let example_path = Path::new("example.json");
|
||||
config.ensure_file_exists("json", example_path);
|
||||
|
||||
if !path.exists() {
|
||||
return UserData {
|
||||
personality: Personality {
|
||||
kind: "positive".into(),
|
||||
strength: 0.8,
|
||||
},
|
||||
relationship: Relationship {
|
||||
trust: 0.2,
|
||||
intimacy: 0.6,
|
||||
curiosity: 0.5,
|
||||
threshold: 1.5,
|
||||
},
|
||||
environment: Environment {
|
||||
luck_today: 0.9,
|
||||
luck_history: vec![0.9, 0.9, 0.9],
|
||||
level: 1,
|
||||
},
|
||||
messaging: Messaging {
|
||||
enabled: true,
|
||||
schedule_time: Some("08:00".to_string()),
|
||||
decay_rate: 0.1,
|
||||
templates: vec![
|
||||
"おはよう!今日もがんばろう!".to_string(),
|
||||
"ねえ、話したいことがあるの。".to_string(),
|
||||
],
|
||||
sent_today: false,
|
||||
last_sent_date: None,
|
||||
},
|
||||
last_interaction: Utc::now(),
|
||||
memory: Memory {
|
||||
recent_messages: vec![],
|
||||
long_term_notes: vec![],
|
||||
},
|
||||
metrics: Metrics {
|
||||
trust: 0.5,
|
||||
intimacy: 0.5,
|
||||
energy: 0.5,
|
||||
can_send: true,
|
||||
last_updated: Utc::now(),
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
let content = fs::read_to_string(path).expect("user.json の読み込みに失敗しました");
|
||||
serde_json::from_str(&content).expect("user.json のパースに失敗しました")
|
||||
}
|
||||
|
||||
pub fn save_user_data(path: &Path, data: &UserData) {
|
||||
let content = serde_json::to_string_pretty(data).expect("user.json のシリアライズ失敗");
|
||||
fs::write(path, content).expect("user.json の書き込みに失敗しました");
|
||||
}
|
||||
|
||||
pub fn update_metrics_decay() -> Metrics {
|
||||
let config = ConfigPaths::new();
|
||||
let path = config.base_dir.join("user.json");
|
||||
let mut data = load_user_data(&path);
|
||||
data.metrics.decay();
|
||||
save_user_data(&path, &data);
|
||||
data.metrics
|
||||
}
|
||||
|
||||
fn decay_param(value: f32, hours: f32) -> f32 {
|
||||
let decay_rate = 0.05;
|
||||
(value * (1.0f32 - decay_rate).powf(hours)).clamp(0.0, 1.0)
|
||||
}
|
72
src/model.rs
72
src/model.rs
@ -1,72 +0,0 @@
|
||||
//src/model.rs
|
||||
use rusqlite::{params, Connection, Result as SqlResult};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct AiSystem {
|
||||
pub personality: Personality,
|
||||
pub relationship: Relationship,
|
||||
pub environment: Environment,
|
||||
pub messaging: Messaging,
|
||||
}
|
||||
|
||||
impl AiSystem {
|
||||
pub fn save_to_db(&self, conn: &Connection) -> SqlResult<()> {
|
||||
conn.execute(
|
||||
"CREATE TABLE IF NOT EXISTS ai_state (id INTEGER PRIMARY KEY, json TEXT)",
|
||||
[],
|
||||
)?;
|
||||
|
||||
let json_data = serde_json::to_string(self).map_err(|e| {
|
||||
rusqlite::Error::ToSqlConversionFailure(Box::new(e))
|
||||
})?;
|
||||
|
||||
conn.execute(
|
||||
"INSERT OR REPLACE INTO ai_state (id, json) VALUES (?1, ?2)",
|
||||
params![1, json_data],
|
||||
)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn load_from_db(conn: &Connection) -> SqlResult<Self> {
|
||||
let mut stmt = conn.prepare("SELECT json FROM ai_state WHERE id = ?1")?;
|
||||
let json: String = stmt.query_row(params![1], |row| row.get(0))?;
|
||||
|
||||
// ここも serde_json のエラーを map_err で変換
|
||||
let system: AiSystem = serde_json::from_str(&json).map_err(|e| {
|
||||
rusqlite::Error::FromSqlConversionFailure(0, rusqlite::types::Type::Text, Box::new(e))
|
||||
})?;
|
||||
|
||||
Ok(system)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct Personality {
|
||||
pub kind: String, // e.g., "positive", "negative", "neutral"
|
||||
pub strength: f32, // 0.0 - 1.0
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct Relationship {
|
||||
pub trust: f32, // 0.0 - 1.0
|
||||
pub intimacy: f32, // 0.0 - 1.0
|
||||
pub curiosity: f32, // 0.0 - 1.0
|
||||
pub threshold: f32, // if sum > threshold, allow messaging
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct Environment {
|
||||
pub luck_today: f32, // 0.1 - 1.0
|
||||
pub luck_history: Vec<f32>, // last 3 values
|
||||
pub level: i32, // current mental strength level
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct Messaging {
|
||||
pub enabled: bool,
|
||||
pub schedule_time: Option<String>, // e.g., "08:00"
|
||||
pub decay_rate: f32, // how quickly emotion fades (0.0 - 1.0)
|
||||
pub templates: Vec<String>, // message template variations
|
||||
}
|
13
src/utils.rs
13
src/utils.rs
@ -1,13 +0,0 @@
|
||||
// src/utils.rs
|
||||
use std::fs;
|
||||
use crate::model::AiSystem;
|
||||
|
||||
pub fn load_config(path: &str) -> AiSystem {
|
||||
let data = fs::read_to_string(path).expect("JSON読み込み失敗");
|
||||
serde_json::from_str(&data).expect("JSONパース失敗")
|
||||
}
|
||||
|
||||
pub fn save_config(path: &str, ai: &AiSystem) {
|
||||
let json = serde_json::to_string_pretty(&ai).expect("JSONシリアライズ失敗");
|
||||
fs::write(path, json).expect("JSON保存失敗");
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user