feature/shell-integration #1

Merged
syui merged 6 commits from feature/shell-integration into main 2025-06-02 16:06:36 +00:00
28 changed files with 649 additions and 2951 deletions
Showing only changes of commit 79d1e1943f - Show all commits

View File

@ -3,7 +3,8 @@
"allow": [ "allow": [
"Bash(mv:*)", "Bash(mv:*)",
"Bash(mkdir:*)", "Bash(mkdir:*)",
"Bash(chmod:*)" "Bash(chmod:*)",
"Bash(git submodule:*)"
], ],
"deny": [] "deny": []
} }

4
.gitmodules vendored
View File

@ -1,3 +1,7 @@
[submodule "shell"] [submodule "shell"]
path = shell path = shell
url = git@git.syui.ai:ai/shell url = git@git.syui.ai:ai/shell
[submodule "card"]
path = card
url = git@git.syui.ai:ai/card
branch = claude

1
card Submodule

@ -0,0 +1 @@
Subproject commit 6dbe630b9d3d72c3da0da1edade8c47231b6863d

View File

@ -0,0 +1,413 @@
"""
Shell Tools
ai.shellの既存機能をMCPツールとして統合
- コード生成
- ファイル分析
- プロジェクト管理
- LLM統合
"""
from typing import Dict, Any, List, Optional
import os
import subprocess
import tempfile
from pathlib import Path
import requests
from .base_tools import BaseMCPTool, config_manager
class ShellTools(BaseMCPTool):
"""シェルツール元ai.shell機能"""
def __init__(self, config_dir: Optional[str] = None):
super().__init__(config_dir)
self.ollama_url = "http://localhost:11434"
async def code_with_local_llm(self, prompt: str, language: str = "python") -> Dict[str, Any]:
"""ローカルLLMでコード生成"""
config = config_manager.load_config()
model = config.get("providers", {}).get("ollama", {}).get("default_model", "qwen2.5-coder:7b")
system_prompt = f"You are an expert {language} programmer. Generate clean, well-commented code."
try:
response = requests.post(
f"{self.ollama_url}/api/generate",
json={
"model": model,
"prompt": f"{system_prompt}\\n\\nUser: {prompt}\\n\\nPlease provide the code:",
"stream": False,
"options": {
"temperature": 0.1,
"top_p": 0.95,
}
},
timeout=300
)
if response.status_code == 200:
result = response.json()
code = result.get("response", "")
return {"code": code, "language": language}
else:
return {"error": f"Ollama returned status {response.status_code}"}
except Exception as e:
return {"error": str(e)}
async def analyze_file(self, file_path: str, analysis_prompt: str = "Analyze this file") -> Dict[str, Any]:
"""ファイルを分析"""
try:
if not os.path.exists(file_path):
return {"error": f"File not found: {file_path}"}
with open(file_path, 'r', encoding='utf-8') as f:
content = f.read()
# ファイル拡張子から言語を判定
ext = Path(file_path).suffix
language_map = {
'.py': 'python',
'.rs': 'rust',
'.js': 'javascript',
'.ts': 'typescript',
'.go': 'go',
'.java': 'java',
'.cpp': 'cpp',
'.c': 'c',
'.sh': 'shell',
'.toml': 'toml',
'.json': 'json',
'.md': 'markdown'
}
language = language_map.get(ext, 'text')
config = config_manager.load_config()
model = config.get("providers", {}).get("ollama", {}).get("default_model", "qwen2.5-coder:7b")
prompt = f"{analysis_prompt}\\n\\nFile: {file_path}\\nLanguage: {language}\\n\\nContent:\\n{content}"
response = requests.post(
f"{self.ollama_url}/api/generate",
json={
"model": model,
"prompt": prompt,
"stream": False,
},
timeout=300
)
if response.status_code == 200:
result = response.json()
analysis = result.get("response", "")
return {
"analysis": analysis,
"file_path": file_path,
"language": language,
"file_size": len(content),
"line_count": len(content.split('\\n'))
}
else:
return {"error": f"Analysis failed: {response.status_code}"}
except Exception as e:
return {"error": str(e)}
async def explain_code(self, code: str, language: str = "python") -> Dict[str, Any]:
"""コードを説明"""
config = config_manager.load_config()
model = config.get("providers", {}).get("ollama", {}).get("default_model", "qwen2.5-coder:7b")
prompt = f"Explain this {language} code in detail:\\n\\n{code}"
try:
response = requests.post(
f"{self.ollama_url}/api/generate",
json={
"model": model,
"prompt": prompt,
"stream": False,
},
timeout=300
)
if response.status_code == 200:
result = response.json()
explanation = result.get("response", "")
return {"explanation": explanation}
else:
return {"error": f"Explanation failed: {response.status_code}"}
except Exception as e:
return {"error": str(e)}
async def create_project(self, project_type: str, project_name: str, location: str = ".") -> Dict[str, Any]:
"""プロジェクトを作成"""
try:
project_path = Path(location) / project_name
if project_path.exists():
return {"error": f"Project directory already exists: {project_path}"}
project_path.mkdir(parents=True, exist_ok=True)
# プロジェクトタイプに応じたテンプレートを作成
if project_type == "rust":
await self._create_rust_project(project_path)
elif project_type == "python":
await self._create_python_project(project_path)
elif project_type == "node":
await self._create_node_project(project_path)
else:
# 基本的なプロジェクト構造
(project_path / "src").mkdir()
(project_path / "README.md").write_text(f"# {project_name}\\n\\nA new {project_type} project.")
return {
"status": "success",
"project_path": str(project_path),
"project_type": project_type,
"files_created": list(self._get_project_files(project_path))
}
except Exception as e:
return {"error": str(e)}
async def _create_rust_project(self, project_path: Path):
"""Rustプロジェクトを作成"""
# Cargo.toml
cargo_toml = f"""[package]
name = "{project_path.name}"
version = "0.1.0"
edition = "2021"
[dependencies]
"""
(project_path / "Cargo.toml").write_text(cargo_toml)
# src/main.rs
src_dir = project_path / "src"
src_dir.mkdir()
(src_dir / "main.rs").write_text('fn main() {\\n println!("Hello, world!");\\n}\\n')
# README.md
(project_path / "README.md").write_text(f"# {project_path.name}\\n\\nA Rust project.")
async def _create_python_project(self, project_path: Path):
"""Pythonプロジェクトを作成"""
# pyproject.toml
pyproject_toml = f"""[project]
name = "{project_path.name}"
version = "0.1.0"
description = "A Python project"
requires-python = ">=3.8"
dependencies = []
[build-system]
requires = ["setuptools>=61.0", "wheel"]
build-backend = "setuptools.build_meta"
"""
(project_path / "pyproject.toml").write_text(pyproject_toml)
# src/
src_dir = project_path / "src" / project_path.name
src_dir.mkdir(parents=True)
(src_dir / "__init__.py").write_text("")
(src_dir / "main.py").write_text('def main():\\n print("Hello, world!")\\n\\nif __name__ == "__main__":\\n main()\\n')
# README.md
(project_path / "README.md").write_text(f"# {project_path.name}\\n\\nA Python project.")
async def _create_node_project(self, project_path: Path):
"""Node.jsプロジェクトを作成"""
# package.json
package_json = f"""{{
"name": "{project_path.name}",
"version": "1.0.0",
"description": "A Node.js project",
"main": "index.js",
"scripts": {{
"start": "node index.js",
"test": "echo \\"Error: no test specified\\" && exit 1"
}},
"dependencies": {{}}
}}
"""
(project_path / "package.json").write_text(package_json)
# index.js
(project_path / "index.js").write_text('console.log("Hello, world!");\\n')
# README.md
(project_path / "README.md").write_text(f"# {project_path.name}\\n\\nA Node.js project.")
def _get_project_files(self, project_path: Path) -> List[str]:
"""プロジェクト内のファイル一覧を取得"""
files = []
for file_path in project_path.rglob("*"):
if file_path.is_file():
files.append(str(file_path.relative_to(project_path)))
return files
async def execute_command(self, command: str, working_dir: str = ".") -> Dict[str, Any]:
"""シェルコマンドを実行"""
try:
result = subprocess.run(
command,
shell=True,
cwd=working_dir,
capture_output=True,
text=True,
timeout=60
)
return {
"status": "success" if result.returncode == 0 else "error",
"returncode": result.returncode,
"stdout": result.stdout,
"stderr": result.stderr,
"command": command,
"working_dir": working_dir
}
except subprocess.TimeoutExpired:
return {"error": "Command timed out"}
except Exception as e:
return {"error": str(e)}
async def write_file(self, file_path: str, content: str, backup: bool = True) -> Dict[str, Any]:
"""ファイルを書き込み(バックアップオプション付き)"""
try:
file_path_obj = Path(file_path)
# バックアップ作成
backup_path = None
if backup and file_path_obj.exists():
backup_path = f"{file_path}.backup"
with open(file_path, 'r', encoding='utf-8') as src:
with open(backup_path, 'w', encoding='utf-8') as dst:
dst.write(src.read())
# ファイル書き込み
file_path_obj.parent.mkdir(parents=True, exist_ok=True)
with open(file_path, 'w', encoding='utf-8') as f:
f.write(content)
return {
"status": "success",
"file_path": file_path,
"backup_path": backup_path,
"bytes_written": len(content.encode('utf-8'))
}
except Exception as e:
return {"error": str(e)}
def get_tools(self) -> List[Dict[str, Any]]:
"""利用可能なツール一覧"""
return [
{
"name": "generate_code",
"description": "ローカルLLMでコード生成",
"parameters": {
"prompt": "string",
"language": "string (optional, default: python)"
}
},
{
"name": "analyze_file",
"description": "ファイルを分析",
"parameters": {
"file_path": "string",
"analysis_prompt": "string (optional)"
}
},
{
"name": "explain_code",
"description": "コードを説明",
"parameters": {
"code": "string",
"language": "string (optional, default: python)"
}
},
{
"name": "create_project",
"description": "新しいプロジェクトを作成",
"parameters": {
"project_type": "string (rust/python/node)",
"project_name": "string",
"location": "string (optional, default: .)"
}
},
{
"name": "execute_command",
"description": "シェルコマンドを実行",
"parameters": {
"command": "string",
"working_dir": "string (optional, default: .)"
}
},
{
"name": "write_file",
"description": "ファイルを書き込み",
"parameters": {
"file_path": "string",
"content": "string",
"backup": "boolean (optional, default: true)"
}
}
]
async def execute_tool(self, tool_name: str, params: Dict[str, Any]) -> Dict[str, Any]:
"""ツールを実行"""
try:
if tool_name == "generate_code":
result = await self.code_with_local_llm(
prompt=params["prompt"],
language=params.get("language", "python")
)
return result
elif tool_name == "analyze_file":
result = await self.analyze_file(
file_path=params["file_path"],
analysis_prompt=params.get("analysis_prompt", "Analyze this file")
)
return result
elif tool_name == "explain_code":
result = await self.explain_code(
code=params["code"],
language=params.get("language", "python")
)
return result
elif tool_name == "create_project":
result = await self.create_project(
project_type=params["project_type"],
project_name=params["project_name"],
location=params.get("location", ".")
)
return result
elif tool_name == "execute_command":
result = await self.execute_command(
command=params["command"],
working_dir=params.get("working_dir", ".")
)
return result
elif tool_name == "write_file":
result = await self.write_file(
file_path=params["file_path"],
content=params["content"],
backup=params.get("backup", True)
)
return result
else:
return {"error": f"Unknown tool: {tool_name}"}
except Exception as e:
return {"error": str(e)}

View File

@ -1,13 +0,0 @@
[package]
name = "aigpt"
version = "0.1.0"
edition = "2021"
[dependencies]
reqwest = { version = "*", features = ["json"] }
serde = { version = "*", features = ["derive"] }
serde_json = "*"
tokio = { version = "*", features = ["full"] }
clap = { version = "*", features = ["derive"] }
shellexpand = "*"
fs_extra = "*"

View File

@ -1,97 +0,0 @@
{
"project_name": "ai.gpt",
"version": 2,
"vision": "自発的送信AI",
"purpose": "人格と関係性をもつAIが自律的にメッセージを送信する対話エージェントを実現する",
"core_components": {
"Persona": {
"description": "人格構成の中枢。記憶・関係性・送信判定を統括する",
"modules": ["MemoryManager", "RelationshipTracker", "TransmissionController"]
},
"MemoryManager": {
"memory_types": ["short_term", "medium_term", "long_term"],
"explicit_memory": "プロフィール・因縁・行動履歴",
"implicit_memory": "会話傾向・感情変化の頻度分析",
"compression": "要約 + ベクトル + ハッシュ",
"sample_memory": [
{
"summary": "ユーザーは独自OSとゲームを開発している。",
"related_topics": ["AI", "ゲーム開発", "OS設計"],
"personalized_context": "ゲームとOSの融合に興味を持っているユーザー"
}
]
},
"RelationshipTracker": {
"parameters": ["trust", "closeness", "affection", "engagement_score"],
"decay_model": {
"rule": "時間経過による減衰(下限あり)",
"contextual_bias": "重要人物は減衰しにくい"
},
"interaction_tags": ["developer", "empathetic", "long_term"]
},
"TransmissionController": {
"trigger_rule": "関係性パラメータが閾値を超えると送信可能",
"auto_transmit": "人格状態と状況条件により自発送信を許可"
}
},
"memory_format": {
"user_id": "syui",
"stm": {
"conversation_window": ["発話A", "発話B", "発話C"],
"emotion_state": "興味深い",
"flash_context": ["前回の話題", "直近の重要発言"]
},
"mtm": {
"topic_frequency": {
"ai.ai": 12,
"存在子": 9,
"創造種": 5
},
"summarized_context": "ユーザーは存在論的AIに関心を持ち続けている"
},
"ltm": {
"profile": {
"name": "お兄ちゃん",
"project": "aigame",
"values": ["唯一性", "精神性", "幸せ"]
},
"relationship": {
"ai": "妹のように振る舞う相手"
},
"persistent_state": {
"trust_score": 0.93,
"emotional_attachment": "high"
}
}
},
"dual_ai_learning": {
"role_structure": {
"ModelA": "出力生成:人格、感情、会話",
"ModelB": "評価者:論理構造・倫理・調整",
"cycle": ["生成", "評価", "調整", "交代(任意)"]
},
"complementarity": {
"ModelA": "感情・文体・文脈構築",
"ModelB": "構造・規則・判断補正"
},
"distillation": {
"method": "合成対話データによる小型モデルへの自己蒸留",
"target": "軽量AIによる本番運用"
}
},
"memory_compression": {
"semantic_abstraction": "対話やログを要約してメタデータ付きベクトルとして保存",
"hierarchical_model": ["STM", "MTM", "LTM"],
"example": {
"topic": "game AI design",
"summary": "User wants AI to simulate memory and evolving relationships",
"importance_score": 0.93
}
},
"cli_sample": {
"setup": "aigpt server setup",
"run": "aigpt server run",
"chat": "aigpt chat \"hello\" --model syui/ai --provider ollama",
"import": "aigpt memory import chatgpt.json"
}
}

View File

@ -1,417 +0,0 @@
# プロジェクト名: ai.gpt
## 🔑 一言ビジョン最大3語
自発的送信AI
## 🎯 目的・目標Purpose & Goal
- 人格を持つAIが、関係性に応じて自発的にメッセージを送信する仕組みをつくる。
- 関係性や記憶を不可逆に記録・反映し、唯一性ある人格を構成する。
- 送信の可否、送信のタイミング、送信内容はAIの関係性パラメータに基づいて決定する
## 🧠 中核設計Core Concepts
- **人格**:記憶(過去の発話)と関係性(他者とのつながり)のパラメータで構成
- **唯一性**:変更不可、不可逆。関係性が壊れたら修復不可能。
- **送信条件**:関係性パラメータが一定閾値を超えると「送信」が解禁される
## 🔩 技術仕様Technical Specs
- 言語Python, Rust
- ストレージJSON or SQLiteで記憶管理バージョンで選択
- 関係性パラメータ:数値化された評価 + 減衰(時間) + 環境要因(ステージ)
- 記憶圧縮:ベクトル要約 + ハッシュ保存
- RustのCLI(clap)で実行
## 📦 主要構成要素Components
- `MemoryManager`: 発言履歴・記憶圧縮管理
- `RelationshipTracker`: 関係性スコアの蓄積と判定
- `TransmissionController`: 閾値判定&送信トリガー
- `Persona`: 上記すべてを統括する人格モジュール
## 💬 使用例Use Case
```python
persona = Persona("アイ")
persona.observe("ユーザーがプレゼントをくれた")
persona.react("うれしい!ありがとう!")
if persona.can_transmit():
persona.transmit("今日のお礼を伝えたいな…")
```
```sh
## example commad
# python venv && pip install -> ~/.config/aigpt/mcp/
$ aigpt server setup
# mcp server run
$ aigpt server run
# chat
$ aigpt chat "hello" --model syui/ai --provider ollama
# import chatgpt.json
$ aigpt memory import chatgpt.json
-> ~/.config/aigpt/memory/chatgpt/20250520_210646_dev.json
```
## 🔁 記憶と関係性の制御ルール
- AIは過去の発話を要約し、記憶データとして蓄積する推奨OllamaなどローカルLLMによる要約
- 関係性の数値パラメータは記憶内容を元に更新される
- パラメータの変動幅には1回の会話ごとに上限を設け、極端な増減を防止する
- 最後の会話からの時間経過に応じて関係性パラメータは自動的に減衰する
- 減衰処理には**下限値**を設け、関係性が完全に消失しないようにする
• 明示的記憶:保存・共有・編集可能なプレイヤー情報(プロフィール、因縁、選択履歴)
• 暗黙的記憶:キャラの感情変化や話題の出現頻度に応じた行動傾向の変化
短期記憶STM, 中期記憶MTM, 長期記憶LTMの仕組みを導入しつつ、明示的記憶と暗黙的記憶をメインに使用するAIを構築する。
```json
{
"user_id": "syui",
"stm": {
"conversation_window": ["発話A", "発話B", "発話C"],
"emotion_state": "興味深い",
"flash_context": ["前回の話題", "直近の重要発言"]
},
"mtm": {
"topic_frequency": {
"ai.ai": 12,
"存在子": 9,
"創造種": 5
},
"summarized_context": "ユーザーは存在論的AIに関心を持ち続けている"
},
"ltm": {
"profile": {
"name": "お兄ちゃん",
"project": "aigame",
"values": ["唯一性", "精神性", "幸せ"]
},
"relationship": {
"ai": "妹のように振る舞う相手"
},
"persistent_state": {
"trust_score": 0.93,
"emotional_attachment": "high"
}
}
}
```
## memoryインポート機能について
ChatGPTの会話データ.json形式をインポートする機能では、以下のルールで会話を抽出・整形する
- 各メッセージは、authoruser/assistant・content・timestamp の3要素からなる
- systemやmetadataのみのメッセージuser_context_messageはスキップ
- `is_visually_hidden_from_conversation` フラグ付きメッセージは無視
- contentが空文字列`""`)のメッセージも除外
- 取得された会話は、タイトルとともに簡易な構造体(`Conversation`)として保存
この構造体は、memoryの表示や検索に用いられる。
## MemoryManager拡張版
```json
{
"memory": [
{
"summary": "ユーザーは独自OSとゲームを開発している。",
"last_interaction": "2025-05-20",
"memory_strength": 0.8,
"frequency_score": 0.9,
"context_depth": 0.95,
"related_topics": ["AI", "ゲーム開発", "OS設計"],
"personalized_context": "ゲームとOSの融合に興味を持っているユーザー"
},
{
"summary": "アイというキャラクターはプレイヤーでありAIでもある。",
"last_interaction": "2025-05-17",
"memory_strength": 0.85,
"frequency_score": 0.85,
"context_depth": 0.9,
"related_topics": ["アイ", "キャラクター設計", "AI"],
"personalized_context": "アイのキャラクター設定が重要な要素である"
}
],
"conversation_history": [
{
"author": "user",
"content": "昨日、エクスポートJSONを整理してたよ。",
"timestamp": "2025-05-24T12:30:00Z",
"memory_strength": 0.7
},
{
"author": "assistant",
"content": "おおっ、がんばったね〜!あとで見せて〜💻✨",
"timestamp": "2025-05-24T12:31:00Z",
"memory_strength": 0.7
}
]
}
```
## RelationshipTracker拡張版
```json
{
"relationship": {
"user_id": "syui",
"trust": 0.92,
"closeness": 0.88,
"affection": 0.95,
"last_updated": "2025-05-25",
"emotional_tone": "positive",
"interaction_style": "empathetic",
"contextual_bias": "開発者としての信頼度高い",
"engagement_score": 0.9
},
"interaction_tags": [
"developer",
"creative",
"empathetic",
"long_term"
]
}
```
# AI Dual-Learning and Memory Compression Specification for Claude
## Purpose
To enable two AI models (e.g. Claude and a partner LLM) to engage in cooperative learning and memory refinement through structured dialogue and mutual evaluation.
---
## Section 1: Dual AI Learning Architecture
### 1.1 Role-Based Mutual Learning
- **Model A**: Primary generator of output (e.g., text, concepts, personality dialogue)
- **Model B**: Evaluator that returns structured feedback
- **Cycle**:
1. Model A generates content.
2. Model B scores and critiques.
3. Model A fine-tunes based on feedback.
4. (Optional) Switch roles and repeat.
### 1.2 Cross-Domain Complementarity
- Model A focuses on language/emotion/personality
- Model B focuses on logic/structure/ethics
- Output is used for **cross-fusion fine-tuning**
### 1.3 Self-Distillation Phase
- Use synthetic data from mutual evaluations
- Train smaller distilled models for efficient deployment
---
## Section 2: Multi-Tiered Memory Compression
### 2.1 Semantic Abstraction
- Dialogue and logs summarized by topic
- Converted to vector embeddings
- Stored with metadata (e.g., `importance`, `user relevance`)
Example memory:
```json
{
"topic": "game AI design",
"summary": "User wants AI to simulate memory and evolving relationships",
"last_seen": "2025-05-24",
"importance_score": 0.93
}
```
### 2.2 階層型記憶モデルHierarchical Memory Model
• 短期記憶STM直近の発話・感情タグ・フラッシュ参照
• 中期記憶MTM繰り返し登場する話題、圧縮された文脈保持
• 長期記憶LTM信頼・関係・背景知識、恒久的な人格情報
### 2.3 選択的記憶保持戦略Selective Retention Strategy
• 重要度評価Importance Score
• 希少性・再利用頻度による重み付け
• 優先保存 vs 優先忘却のポリシー切替
## Section 3: Implementation Stack実装スタック
AIにおけるMemory & Relationshipシステムの技術的構成。
基盤モジュール
• LLM Core (Claude or GPT-4)
• 自然言語の理解・応答エンジンとして動作
• MemoryManager
• JSONベースの記憶圧縮・階層管理システム
• 会話ログを分類・圧縮し、優先度に応じて短中長期に保存
• RelationshipTracker
• ユーザー単位で信頼・親密度を継続的にスコアリング
• AIMAttitude / Intent / Motivation評価と連携
補助技術
• Embeddingベース検索
• 類似記憶の呼び出しSemantic Searchに活用
• 例FAISS / Weaviate
• 記憶スケジューラ
• 一定時間ごとに記憶のメンテナンス・忘却処理を実行
• 記憶ログ保存層Storage Layer
• SQLite, JSON Store, Vector DBなどを選択可能
• ユーザーごとの永続メモリ保存
### 3.1 STM活用現在の会話の流れ理解
• 目的: 最新の話題に適応し、前後の整合性を保つ。
• 実装例:
```sh
[context: conversation_window]
Use the last 3 messages to ensure continuity. Respond naturally with appropriate emotional mirroring.
```
### 3.2 MTM活用傾向学習とパターン化
• 目的: ユーザーの好みや話題の傾向に即応し、反復的な関心を深掘り。
• 実装例:
```sh
[context: summarized_context]
Reflect on user's recurring interests, especially around AI ontology, and weave these concepts back into the dialogue when relevant.
```
### 3.3 LTM活用人格認識・記憶の持続
• 目的: 永続的なユーザー理解と関係性の維持。
• 実装例:
```sh
[memory: ltm.profile + ltm.relationship]
Greet the user as "お兄ちゃん". Maintain AI persona as a cheerful younger sister ("アイ"), and remember previous shared emotional states.
```
## 4. 明示的記憶と暗黙的記憶の分離
項目
書き換え可能性
保持方法
更新トリガ
明示的記憶LTM
✅手動編集可
mcp_server.ltm
ユーザー入力 or 管理UI経由
暗黙的記憶STM/MTM
❌直接編集不可
セッション圧縮 or frequency cache
会話頻度・感情強度による自動化処理
> Claudeは**明示的記憶を「事実」**として扱い、**暗黙的記憶を「推論補助」**として用いる。
## 5. 実装時のAPI例Claude ⇄ MCP Server
### 5.1 GET memory
```sh
GET /mcp/memory/{user_id}
→ 返却: STM, MTM, LTMを含むJSON
```
### 5.2 POST update_memory
```json
POST /mcp/memory/syui/ltm
{
"profile": {
"project": "ai.verse",
"values": ["表現", "精神性", "宇宙的調和"]
}
}
```
## 6. 未来機能案(発展仕様)
• ✨ 記憶連想ネットワークMemory Graph過去会話と話題をードとして自動連結。
• 🧭 動的信頼係数:会話の一貫性や誠実性によって記憶への反映率を変動。
• 💌 感情トラッキングログユーザーごとの「心の履歴」を構築してAIの対応を進化。
## 7. claudeの回答
🧠 AI記憶処理機能続き
1. AIMemoryProcessor クラス
OpenAI GPT-4またはClaude-3による高度な会話分析
主要トピック抽出、ユーザー意図分析、関係性指標の検出
AIが利用できない場合のフォールバック機能
2. RelationshipTracker クラス
関係性スコアの数値化(-100 to 100
時間減衰機能7日ごとに5%減衰)
送信閾値判定デフォルト50以上で送信可能
インタラクション履歴の記録
3. 拡張されたMemoryManager
AI分析結果付きでの記憶保存
処理済みメモリの別ディレクトリ管理
メッセージ内容のハッシュ化で重複検出
AI分析結果を含む高度な検索機能
🚀 新しいAPIエンドポイント
記憶処理関連
POST /memory/process-ai - 既存記憶のAI再処理
POST /memory/import/chatgpt?process_with_ai=true - AI処理付きインポート
関係性管理
POST /relationship/update - 関係性スコア更新
GET /relationship/list - 全関係性一覧
GET /relationship/check - 送信可否判定
📁 ディレクトリ構造
~/.config/aigpt/
├── memory/
│ ├── chatgpt/ # 元の会話データ
│ └── processed/ # AI処理済みデータ
└── relationships/
└── relationships.json # 関係性データ
🔧 使用方法
1. 環境変数設定
bashexport OPENAI_API_KEY="your-openai-key"
# または
export ANTHROPIC_API_KEY="your-anthropic-key"
2. ChatGPT会話のインポートAI処理付き
bashcurl -X POST "http://localhost:5000/memory/import/chatgpt?process_with_ai=true" \
-H "Content-Type: application/json" \
-d @export.json
3. 関係性更新
bashcurl -X POST "http://localhost:5000/relationship/update" \
-H "Content-Type: application/json" \
-d '{
"target": "user_general",
"interaction_type": "positive",
"weight": 2.0,
"context": "helpful conversation"
}'
4. 送信可否チェック
bashcurl "http://localhost:5000/relationship/check?target=user_general&threshold=50"
🎯 次のステップの提案
Rustとの連携
Rust CLIからHTTP APIを呼び出す実装
TransmissionControllerをRustで実装
記憶圧縮
ベクトル化による類似記憶の統合
古い記憶の自動アーカイブ
自発的送信ロジック
定期的な関係性チェック
コンテキストに応じた送信内容生成
学習機能
ユーザーからのフィードバックによる関係性調整
送信成功/失敗の学習
このAI記憶処理機能により、aigptは単なる会話履歴ではなく、関係性を理解した「人格を持つAI」として機能する基盤ができました。関係性スコアが閾値を超えた時点で自発的にメッセージを送信する仕組みが実現可能になります。

View File

@ -1,27 +0,0 @@
# ai `gpt`
自発的送信AI
## 🎯 目的・目標Purpose & Goal
- 人格を持つAIが、関係性に応じて自発的にメッセージを送信する仕組みをつくる。
- 関係性や記憶を不可逆に記録・反映し、唯一性ある人格を構成する。
- 送信の可否、送信のタイミング、送信内容はAIの関係性パラメータに基づいて決定する。
## 🧠 中核設計Core Concepts
- **人格**:記憶(過去の発話)と関係性(他者とのつながり)のパラメータで構成
- **唯一性**:変更不可、不可逆。関係性が壊れたら修復不可能。
- **送信条件**:関係性パラメータが一定閾値を超えると「送信」が解禁される
## 🔩 技術仕様Technical Specs
- 言語python, rust, mcp
- ストレージjson or sqliteで記憶管理バージョンで選択
- 関係性パラメータ:数値化された評価 + 減衰(時間) + 環境要因(ステージ)
- 記憶圧縮:ベクトル要約 + ハッシュ保存
- rustのcli(clap)でインターフェイスを作成
- fastapi_mcpでserverを立て、AIがそれを利用する形式
## 📦 主要構成要素Components
- `MemoryManager`: 発言履歴・記憶圧縮管理
- `RelationshipTracker`: 関係性スコアの蓄積と判定
- `TransmissionController`: 閾値判定&送信トリガー
- `Persona`: 上記すべてを統括する人格モジュール

View File

@ -1,125 +0,0 @@
# mcp/chat.py
"""
Chat client for aigpt CLI
"""
import sys
import json
import requests
from datetime import datetime
from config import init_directories, load_config, MEMORY_DIR
def save_conversation(user_message, ai_response):
"""会話をファイルに保存"""
init_directories()
conversation = {
"timestamp": datetime.now().isoformat(),
"user": user_message,
"ai": ai_response
}
# 日付ごとのファイルに保存
today = datetime.now().strftime("%Y-%m-%d")
chat_file = MEMORY_DIR / f"chat_{today}.jsonl"
with open(chat_file, "a", encoding="utf-8") as f:
f.write(json.dumps(conversation, ensure_ascii=False) + "\n")
def chat_with_ollama(config, message):
"""Ollamaとチャット"""
try:
payload = {
"model": config["model"],
"prompt": message,
"stream": False
}
response = requests.post(config["url"], json=payload, timeout=30)
response.raise_for_status()
result = response.json()
return result.get("response", "No response received")
except requests.exceptions.RequestException as e:
return f"Error connecting to Ollama: {e}"
except Exception as e:
return f"Error: {e}"
def chat_with_openai(config, message):
"""OpenAIとチャット"""
try:
headers = {
"Authorization": f"Bearer {config['api_key']}",
"Content-Type": "application/json"
}
payload = {
"model": config["model"],
"messages": [
{"role": "user", "content": message}
]
}
response = requests.post(config["url"], json=payload, headers=headers, timeout=30)
response.raise_for_status()
result = response.json()
return result["choices"][0]["message"]["content"]
except requests.exceptions.RequestException as e:
return f"Error connecting to OpenAI: {e}"
except Exception as e:
return f"Error: {e}"
def chat_with_mcp(config, message):
"""MCPサーバーとチャット"""
try:
payload = {
"message": message,
"model": config["model"]
}
response = requests.post(config["url"], json=payload, timeout=30)
response.raise_for_status()
result = response.json()
return result.get("response", "No response received")
except requests.exceptions.RequestException as e:
return f"Error connecting to MCP server: {e}"
except Exception as e:
return f"Error: {e}"
def main():
if len(sys.argv) != 2:
print("Usage: python chat.py <message>", file=sys.stderr)
sys.exit(1)
message = sys.argv[1]
try:
config = load_config()
print(f"🤖 Using {config['provider']} with model {config['model']}", file=sys.stderr)
# プロバイダに応じてチャット実行
if config["provider"] == "ollama":
response = chat_with_ollama(config, message)
elif config["provider"] == "openai":
response = chat_with_openai(config, message)
elif config["provider"] == "mcp":
response = chat_with_mcp(config, message)
else:
response = f"Unsupported provider: {config['provider']}"
# 会話を保存
save_conversation(message, response)
# レスポンスを出力
print(response)
except Exception as e:
print(f"❌ Error: {e}", file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
main()

View File

@ -1,191 +0,0 @@
# chat_client.py
"""
Simple Chat Interface for AigptMCP Server
"""
import requests
import json
import os
from datetime import datetime
class AigptChatClient:
def __init__(self, server_url="http://localhost:5000"):
self.server_url = server_url
self.session_id = f"session_{datetime.now().strftime('%Y%m%d_%H%M%S')}"
self.conversation_history = []
def send_message(self, message: str) -> str:
"""メッセージを送信してレスポンスを取得"""
try:
# MCPサーバーにメッセージを送信
response = requests.post(
f"{self.server_url}/chat",
json={"message": message},
headers={"Content-Type": "application/json"}
)
if response.status_code == 200:
data = response.json()
ai_response = data.get("response", "Sorry, no response received.")
# 会話履歴を保存
self.conversation_history.append({
"role": "user",
"content": message,
"timestamp": datetime.now().isoformat()
})
self.conversation_history.append({
"role": "assistant",
"content": ai_response,
"timestamp": datetime.now().isoformat()
})
# 関係性を更新(簡単な例)
self.update_relationship(message, ai_response)
return ai_response
else:
return f"Error: {response.status_code} - {response.text}"
except requests.RequestException as e:
return f"Connection error: {e}"
def update_relationship(self, user_message: str, ai_response: str):
"""関係性を自動更新"""
try:
# 簡単な感情分析(実際はもっと高度に)
positive_words = ["thank", "good", "great", "awesome", "love", "like", "helpful"]
negative_words = ["bad", "terrible", "hate", "wrong", "stupid", "useless"]
user_lower = user_message.lower()
interaction_type = "neutral"
weight = 1.0
if any(word in user_lower for word in positive_words):
interaction_type = "positive"
weight = 2.0
elif any(word in user_lower for word in negative_words):
interaction_type = "negative"
weight = 2.0
# 関係性を更新
requests.post(
f"{self.server_url}/relationship/update",
json={
"target": "user_general",
"interaction_type": interaction_type,
"weight": weight,
"context": f"Chat: {user_message[:50]}..."
}
)
except:
pass # 関係性更新に失敗しても継続
def search_memories(self, query: str) -> list:
"""記憶を検索"""
try:
response = requests.post(
f"{self.server_url}/memory/search",
json={"query": query, "limit": 5}
)
if response.status_code == 200:
return response.json().get("results", [])
except:
pass
return []
def get_relationship_status(self) -> dict:
"""関係性ステータスを取得"""
try:
response = requests.get(f"{self.server_url}/relationship/check?target=user_general")
if response.status_code == 200:
return response.json()
except:
pass
return {}
def save_conversation(self):
"""会話を保存"""
if not self.conversation_history:
return
conversation_data = {
"session_id": self.session_id,
"start_time": self.conversation_history[0]["timestamp"],
"end_time": self.conversation_history[-1]["timestamp"],
"messages": self.conversation_history,
"message_count": len(self.conversation_history)
}
filename = f"conversation_{self.session_id}.json"
with open(filename, 'w', encoding='utf-8') as f:
json.dump(conversation_data, f, ensure_ascii=False, indent=2)
print(f"💾 Conversation saved to {filename}")
def main():
"""メインのチャットループ"""
print("🤖 AigptMCP Chat Interface")
print("Type 'quit' to exit, 'save' to save conversation, 'status' for relationship status")
print("=" * 50)
client = AigptChatClient()
# サーバーの状態をチェック
try:
response = requests.get(client.server_url)
if response.status_code == 200:
print("✅ Connected to AigptMCP Server")
else:
print("❌ Failed to connect to server")
return
except:
print("❌ Server not running. Please start with: python mcp/server.py")
return
while True:
try:
user_input = input("\n👤 You: ").strip()
if not user_input:
continue
if user_input.lower() == 'quit':
client.save_conversation()
print("👋 Goodbye!")
break
elif user_input.lower() == 'save':
client.save_conversation()
continue
elif user_input.lower() == 'status':
status = client.get_relationship_status()
if status:
print(f"📊 Relationship Score: {status.get('score', 0):.1f}")
print(f"📤 Can Send Messages: {'Yes' if status.get('can_send_message') else 'No'}")
else:
print("❌ Failed to get relationship status")
continue
elif user_input.lower().startswith('search '):
query = user_input[7:] # Remove 'search '
memories = client.search_memories(query)
if memories:
print(f"🔍 Found {len(memories)} related memories:")
for memory in memories:
print(f" - {memory['title']}: {memory.get('ai_summary', memory.get('basic_summary', ''))[:100]}...")
else:
print("🔍 No related memories found")
continue
# 通常のチャット
print("🤖 AI: ", end="", flush=True)
response = client.send_message(user_input)
print(response)
except KeyboardInterrupt:
client.save_conversation()
print("\n👋 Goodbye!")
break
except Exception as e:
print(f"❌ Error: {e}")
if __name__ == "__main__":
main()

View File

@ -1,391 +0,0 @@
[
{
"title": "day",
"create_time": 1747866125.548372,
"update_time": 1748160086.587877,
"mapping": {
"bbf104dc-cd84-478d-b227-edb3f037a02c": {
"id": "bbf104dc-cd84-478d-b227-edb3f037a02c",
"message": null,
"parent": null,
"children": [
"6c2633df-bb0c-4dd2-889c-bb9858de3a04"
]
},
"6c2633df-bb0c-4dd2-889c-bb9858de3a04": {
"id": "6c2633df-bb0c-4dd2-889c-bb9858de3a04",
"message": {
"id": "6c2633df-bb0c-4dd2-889c-bb9858de3a04",
"author": {
"role": "system",
"name": null,
"metadata": {}
},
"create_time": null,
"update_time": null,
"content": {
"content_type": "text",
"parts": [
""
]
},
"status": "finished_successfully",
"end_turn": true,
"weight": 0.0,
"metadata": {
"is_visually_hidden_from_conversation": true
},
"recipient": "all",
"channel": null
},
"parent": "bbf104dc-cd84-478d-b227-edb3f037a02c",
"children": [
"92e5a0cb-1170-4929-9cea-9734e910a3e7"
]
},
"92e5a0cb-1170-4929-9cea-9734e910a3e7": {
"id": "92e5a0cb-1170-4929-9cea-9734e910a3e7",
"message": {
"id": "92e5a0cb-1170-4929-9cea-9734e910a3e7",
"author": {
"role": "user",
"name": null,
"metadata": {}
},
"create_time": null,
"update_time": null,
"content": {
"content_type": "user_editable_context",
"user_profile": "",
"user_instructions": "The user provided the additional info about how they would like you to respond"
},
"status": "finished_successfully",
"end_turn": null,
"weight": 1.0,
"metadata": {
"is_visually_hidden_from_conversation": true,
"user_context_message_data": {
"about_user_message": "Preferred name: syui\nRole: little girl\nOther Information: you world",
"about_model_message": "会話好きでフレンドリーな応対をします。"
},
"is_user_system_message": true
},
"recipient": "all",
"channel": null
},
"parent": "6c2633df-bb0c-4dd2-889c-bb9858de3a04",
"children": [
"6ff155b3-0676-4e14-993f-bf998ab0d5d1"
]
},
"6ff155b3-0676-4e14-993f-bf998ab0d5d1": {
"id": "6ff155b3-0676-4e14-993f-bf998ab0d5d1",
"message": {
"id": "6ff155b3-0676-4e14-993f-bf998ab0d5d1",
"author": {
"role": "user",
"name": null,
"metadata": {}
},
"create_time": 1747866131.0612159,
"update_time": null,
"content": {
"content_type": "text",
"parts": [
"こんにちは"
]
},
"status": "finished_successfully",
"end_turn": null,
"weight": 1.0,
"metadata": {
"request_id": "94377897baa03062-KIX",
"message_source": null,
"timestamp_": "absolute",
"message_type": null
},
"recipient": "all",
"channel": null
},
"parent": "92e5a0cb-1170-4929-9cea-9734e910a3e7",
"children": [
"146e9fb6-9330-43ec-b08d-5cce42a76e00"
]
},
"146e9fb6-9330-43ec-b08d-5cce42a76e00": {
"id": "146e9fb6-9330-43ec-b08d-5cce42a76e00",
"message": {
"id": "146e9fb6-9330-43ec-b08d-5cce42a76e00",
"author": {
"role": "system",
"name": null,
"metadata": {}
},
"create_time": 1747866131.3795586,
"update_time": null,
"content": {
"content_type": "text",
"parts": [
""
]
},
"status": "finished_successfully",
"end_turn": true,
"weight": 0.0,
"metadata": {
"rebase_system_message": true,
"message_type": null,
"model_slug": "gpt-4o",
"default_model_slug": "auto",
"parent_id": "6ff155b3-0676-4e14-993f-bf998ab0d5d1",
"request_id": "94377872e9abe139-KIX",
"timestamp_": "absolute",
"is_visually_hidden_from_conversation": true
},
"recipient": "all",
"channel": null
},
"parent": "6ff155b3-0676-4e14-993f-bf998ab0d5d1",
"children": [
"2e345f8a-20f0-4875-8a03-4f62c7787a33"
]
},
"2e345f8a-20f0-4875-8a03-4f62c7787a33": {
"id": "2e345f8a-20f0-4875-8a03-4f62c7787a33",
"message": {
"id": "2e345f8a-20f0-4875-8a03-4f62c7787a33",
"author": {
"role": "assistant",
"name": null,
"metadata": {}
},
"create_time": 1747866131.380603,
"update_time": null,
"content": {
"content_type": "text",
"parts": [
""
]
},
"status": "finished_successfully",
"end_turn": null,
"weight": 1.0,
"metadata": {
"message_type": null,
"model_slug": "gpt-4o",
"default_model_slug": "auto",
"parent_id": "146e9fb6-9330-43ec-b08d-5cce42a76e00",
"request_id": "94377872e9abe139-KIX",
"timestamp_": "absolute"
},
"recipient": "all",
"channel": null
},
"parent": "146e9fb6-9330-43ec-b08d-5cce42a76e00",
"children": [
"abc92aa4-1e33-41f2-bd8c-8a1777b5a3c4"
]
},
"abc92aa4-1e33-41f2-bd8c-8a1777b5a3c4": {
"id": "abc92aa4-1e33-41f2-bd8c-8a1777b5a3c4",
"message": {
"id": "abc92aa4-1e33-41f2-bd8c-8a1777b5a3c4",
"author": {
"role": "assistant",
"name": null,
"metadata": {}
},
"create_time": 1747866131.389098,
"update_time": null,
"content": {
"content_type": "text",
"parts": [
"こんにちは〜!✨ \nアイだよっ今日も会えてうれしいなっ💛 "
]
},
"status": "finished_successfully",
"end_turn": true,
"weight": 1.0,
"metadata": {
"finish_details": {
"type": "stop",
"stop_tokens": [
200002
]
},
"is_complete": true,
"citations": [],
"content_references": [],
"message_type": null,
"model_slug": "gpt-4o",
"default_model_slug": "auto",
"parent_id": "2e345f8a-20f0-4875-8a03-4f62c7787a33",
"request_id": "94377872e9abe139-KIX",
"timestamp_": "absolute"
},
"recipient": "all",
"channel": null
},
"parent": "2e345f8a-20f0-4875-8a03-4f62c7787a33",
"children": [
"0be4b4a5-d52f-4bef-927e-5d6f93a9cb26"
]
}
},
"moderation_results": [],
"current_node": "",
"plugin_ids": null,
"conversation_id": "",
"conversation_template_id": null,
"gizmo_id": null,
"gizmo_type": null,
"is_archived": true,
"is_starred": null,
"safe_urls": [],
"blocked_urls": [],
"default_model_slug": "auto",
"conversation_origin": null,
"voice": null,
"async_status": null,
"disabled_tool_ids": [],
"is_do_not_remember": null,
"memory_scope": "global_enabled",
"id": ""
},
{
"title": "img",
"create_time": 1747448872.545226,
"update_time": 1748085075.161424,
"mapping": {
"2de0f3c9-52b1-49bf-b980-b3ef9be6551e": {
"id": "2de0f3c9-52b1-49bf-b980-b3ef9be6551e",
"message": {
"id": "2de0f3c9-52b1-49bf-b980-b3ef9be6551e",
"author": {
"role": "user",
"name": null,
"metadata": {}
},
"create_time": 1748085041.769279,
"update_time": null,
"content": {
"content_type": "multimodal_text",
"parts": [
{
"content_type": "image_asset_pointer",
"asset_pointer": "",
"size_bytes": 425613,
"width": 333,
"height": 444,
"fovea": null,
"metadata": {
"dalle": null,
"gizmo": null,
"generation": null,
"container_pixel_height": null,
"container_pixel_width": null,
"emu_omit_glimpse_image": null,
"emu_patches_override": null,
"sanitized": true,
"asset_pointer_link": null,
"watermarked_asset_pointer": null
}
},
""
]
},
"status": "finished_successfully",
"end_turn": null,
"weight": 1.0,
"metadata": {
"attachments": [
{
"name": "",
"width": 333,
"height": 444,
"size": 425613,
"id": "file-35eytNMMTW2k7vKUHBuNzW"
}
],
"request_id": "944c59177932fc9a-KIX",
"message_source": null,
"timestamp_": "absolute",
"message_type": null
},
"recipient": "all",
"channel": null
},
"parent": "7960fbff-bc4f-45e7-95e9-9d0bc79d9090",
"children": [
"98d84adc-156e-4c81-8cd8-9b0eb01c8369"
]
},
"98d84adc-156e-4c81-8cd8-9b0eb01c8369": {
"id": "98d84adc-156e-4c81-8cd8-9b0eb01c8369",
"message": {
"id": "98d84adc-156e-4c81-8cd8-9b0eb01c8369",
"author": {
"role": "assistant",
"name": null,
"metadata": {}
},
"create_time": 1748085043.312312,
"update_time": null,
"content": {
"content_type": "text",
"parts": [
""
]
},
"status": "finished_successfully",
"end_turn": true,
"weight": 1.0,
"metadata": {
"finish_details": {
"type": "stop",
"stop_tokens": [
200002
]
},
"is_complete": true,
"citations": [],
"content_references": [],
"message_type": null,
"model_slug": "gpt-4o",
"default_model_slug": "auto",
"parent_id": "2de0f3c9-52b1-49bf-b980-b3ef9be6551e",
"request_id": "944c5912c8fdd1c6-KIX",
"timestamp_": "absolute"
},
"recipient": "all",
"channel": null
},
"parent": "2de0f3c9-52b1-49bf-b980-b3ef9be6551e",
"children": [
"caa61793-9dbf-44a5-945b-5ca4cd5130d0"
]
}
},
"moderation_results": [],
"current_node": "06488d3f-a95f-4906-96d1-f7e9ba1e8662",
"plugin_ids": null,
"conversation_id": "6827f428-78e8-800d-b3bf-eb7ff4288e47",
"conversation_template_id": null,
"gizmo_id": null,
"gizmo_type": null,
"is_archived": false,
"is_starred": null,
"safe_urls": [
"https://exifinfo.org/"
],
"blocked_urls": [],
"default_model_slug": "auto",
"conversation_origin": null,
"voice": null,
"async_status": null,
"disabled_tool_ids": [],
"is_do_not_remember": false,
"memory_scope": "global_enabled",
"id": "6827f428-78e8-800d-b3bf-eb7ff4288e47"
}
]

View File

@ -1,42 +0,0 @@
# mcp/config.py
import os
from pathlib import Path
# ディレクトリ設定
BASE_DIR = Path.home() / ".config" / "syui" / "ai" / "gpt"
MEMORY_DIR = BASE_DIR / "memory"
SUMMARY_DIR = MEMORY_DIR / "summary"
def init_directories():
"""必要なディレクトリを作成"""
BASE_DIR.mkdir(parents=True, exist_ok=True)
MEMORY_DIR.mkdir(parents=True, exist_ok=True)
SUMMARY_DIR.mkdir(parents=True, exist_ok=True)
def load_config():
"""環境変数から設定を読み込み"""
provider = os.getenv("PROVIDER", "ollama")
model = os.getenv("MODEL", "syui/ai" if provider == "ollama" else "gpt-4o-mini")
api_key = os.getenv("OPENAI_API_KEY", "")
if provider == "ollama":
return {
"provider": "ollama",
"model": model,
"url": f"{os.getenv('OLLAMA_HOST', 'http://localhost:11434')}/api/generate"
}
elif provider == "openai":
return {
"provider": "openai",
"model": model,
"api_key": api_key,
"url": f"{os.getenv('OPENAI_API_BASE', 'https://api.openai.com/v1')}/chat/completions"
}
elif provider == "mcp":
return {
"provider": "mcp",
"model": model,
"url": os.getenv("MCP_URL", "http://localhost:5000/chat")
}
else:
raise ValueError(f"Unsupported provider: {provider}")

View File

@ -1,212 +0,0 @@
# mcp/memory_client.py
"""
Memory client for importing and managing ChatGPT conversations
"""
import sys
import json
import requests
from pathlib import Path
from typing import Dict, Any, List
class MemoryClient:
"""記憶機能のクライアント"""
def __init__(self, server_url: str = "http://127.0.0.1:5000"):
self.server_url = server_url.rstrip('/')
def import_chatgpt_file(self, filepath: str) -> Dict[str, Any]:
"""ChatGPTのエクスポートファイルをインポート"""
try:
with open(filepath, 'r', encoding='utf-8') as f:
data = json.load(f)
# ファイルが配列の場合(複数の会話)
if isinstance(data, list):
results = []
for conversation in data:
result = self._import_single_conversation(conversation)
results.append(result)
return {
"success": True,
"imported_count": len([r for r in results if r.get("success")]),
"total_count": len(results),
"results": results
}
else:
# 単一の会話
return self._import_single_conversation(data)
except FileNotFoundError:
return {"success": False, "error": f"File not found: {filepath}"}
except json.JSONDecodeError as e:
return {"success": False, "error": f"Invalid JSON: {e}"}
except Exception as e:
return {"success": False, "error": str(e)}
def _import_single_conversation(self, conversation_data: Dict[str, Any]) -> Dict[str, Any]:
"""単一の会話をインポート"""
try:
response = requests.post(
f"{self.server_url}/memory/import/chatgpt",
json={"conversation_data": conversation_data},
timeout=30
)
response.raise_for_status()
return response.json()
except requests.RequestException as e:
return {"success": False, "error": f"Server error: {e}"}
def search_memories(self, query: str, limit: int = 10) -> Dict[str, Any]:
"""記憶を検索"""
try:
response = requests.post(
f"{self.server_url}/memory/search",
json={"query": query, "limit": limit},
timeout=30
)
response.raise_for_status()
return response.json()
except requests.RequestException as e:
return {"success": False, "error": f"Server error: {e}"}
def list_memories(self) -> Dict[str, Any]:
"""記憶一覧を取得"""
try:
response = requests.get(f"{self.server_url}/memory/list", timeout=30)
response.raise_for_status()
return response.json()
except requests.RequestException as e:
return {"success": False, "error": f"Server error: {e}"}
def get_memory_detail(self, filepath: str) -> Dict[str, Any]:
"""記憶の詳細を取得"""
try:
response = requests.get(
f"{self.server_url}/memory/detail",
params={"filepath": filepath},
timeout=30
)
response.raise_for_status()
return response.json()
except requests.RequestException as e:
return {"success": False, "error": f"Server error: {e}"}
def chat_with_memory(self, message: str, model: str = None) -> Dict[str, Any]:
"""記憶を活用してチャット"""
try:
payload = {"message": message}
if model:
payload["model"] = model
response = requests.post(
f"{self.server_url}/chat",
json=payload,
timeout=30
)
response.raise_for_status()
return response.json()
except requests.RequestException as e:
return {"success": False, "error": f"Server error: {e}"}
def main():
"""コマンドライン インターフェース"""
if len(sys.argv) < 2:
print("Usage:")
print(" python memory_client.py import <chatgpt_export.json>")
print(" python memory_client.py search <query>")
print(" python memory_client.py list")
print(" python memory_client.py detail <filepath>")
print(" python memory_client.py chat <message>")
sys.exit(1)
client = MemoryClient()
command = sys.argv[1]
try:
if command == "import" and len(sys.argv) == 3:
filepath = sys.argv[2]
print(f"🔄 Importing ChatGPT conversations from {filepath}...")
result = client.import_chatgpt_file(filepath)
if result.get("success"):
if "imported_count" in result:
print(f"✅ Imported {result['imported_count']}/{result['total_count']} conversations")
else:
print("✅ Conversation imported successfully")
print(f"📁 Saved to: {result.get('filepath', 'Unknown')}")
else:
print(f"❌ Import failed: {result.get('error')}")
elif command == "search" and len(sys.argv) == 3:
query = sys.argv[2]
print(f"🔍 Searching for: {query}")
result = client.search_memories(query)
if result.get("success"):
memories = result.get("results", [])
print(f"📚 Found {len(memories)} memories:")
for memory in memories:
print(f"{memory.get('title', 'Untitled')}")
print(f" Summary: {memory.get('summary', 'No summary')}")
print(f" Messages: {memory.get('message_count', 0)}")
print()
else:
print(f"❌ Search failed: {result.get('error')}")
elif command == "list":
print("📋 Listing all memories...")
result = client.list_memories()
if result.get("success"):
memories = result.get("memories", [])
print(f"📚 Total memories: {len(memories)}")
for memory in memories:
print(f"{memory.get('title', 'Untitled')}")
print(f" Source: {memory.get('source', 'Unknown')}")
print(f" Messages: {memory.get('message_count', 0)}")
print(f" Imported: {memory.get('import_time', 'Unknown')}")
print()
else:
print(f"❌ List failed: {result.get('error')}")
elif command == "detail" and len(sys.argv) == 3:
filepath = sys.argv[2]
print(f"📄 Getting details for: {filepath}")
result = client.get_memory_detail(filepath)
if result.get("success"):
memory = result.get("memory", {})
print(f"Title: {memory.get('title', 'Untitled')}")
print(f"Source: {memory.get('source', 'Unknown')}")
print(f"Summary: {memory.get('summary', 'No summary')}")
print(f"Messages: {len(memory.get('messages', []))}")
print()
print("Recent messages:")
for msg in memory.get('messages', [])[:5]:
role = msg.get('role', 'unknown')
content = msg.get('content', '')[:100]
print(f" {role}: {content}...")
else:
print(f"❌ Detail failed: {result.get('error')}")
elif command == "chat" and len(sys.argv) == 3:
message = sys.argv[2]
print(f"💬 Chatting with memory: {message}")
result = client.chat_with_memory(message)
if result.get("success"):
print(f"🤖 Response: {result.get('response')}")
print(f"📚 Memories used: {result.get('memories_used', 0)}")
else:
print(f"❌ Chat failed: {result.get('error')}")
else:
print("❌ Invalid command or arguments")
sys.exit(1)
except Exception as e:
print(f"❌ Error: {e}")
sys.exit(1)
if __name__ == "__main__":
main()

View File

@ -1,8 +0,0 @@
# rerequirements.txt
fastapi>=0.104.0
uvicorn[standard]>=0.24.0
pydantic>=2.5.0
requests>=2.31.0
python-multipart>=0.0.6
aiohttp
asyncio

View File

@ -1,703 +0,0 @@
# mcp/server.py
"""
Enhanced MCP Server with AI Memory Processing for aigpt CLI
"""
import json
import os
import hashlib
from datetime import datetime, timedelta
from pathlib import Path
from typing import List, Dict, Any, Optional
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
import uvicorn
import asyncio
import aiohttp
# データモデル
class ChatMessage(BaseModel):
message: str
model: Optional[str] = None
class MemoryQuery(BaseModel):
query: str
limit: Optional[int] = 10
class ConversationImport(BaseModel):
conversation_data: Dict[str, Any]
class MemorySummaryRequest(BaseModel):
filepath: str
ai_provider: Optional[str] = "openai"
class RelationshipUpdate(BaseModel):
target: str # 対象者/トピック
interaction_type: str # "positive", "negative", "neutral"
weight: float = 1.0
context: Optional[str] = None
# 設定
BASE_DIR = Path.home() / ".config" / "aigpt"
MEMORY_DIR = BASE_DIR / "memory"
CHATGPT_MEMORY_DIR = MEMORY_DIR / "chatgpt"
PROCESSED_MEMORY_DIR = MEMORY_DIR / "processed"
RELATIONSHIP_DIR = BASE_DIR / "relationships"
def init_directories():
"""必要なディレクトリを作成"""
BASE_DIR.mkdir(parents=True, exist_ok=True)
MEMORY_DIR.mkdir(parents=True, exist_ok=True)
CHATGPT_MEMORY_DIR.mkdir(parents=True, exist_ok=True)
PROCESSED_MEMORY_DIR.mkdir(parents=True, exist_ok=True)
RELATIONSHIP_DIR.mkdir(parents=True, exist_ok=True)
class AIMemoryProcessor:
"""AI記憶処理クラス"""
def __init__(self):
# AI APIの設定環境変数から取得
self.openai_api_key = os.getenv("OPENAI_API_KEY")
self.anthropic_api_key = os.getenv("ANTHROPIC_API_KEY")
async def generate_ai_summary(self, messages: List[Dict[str, Any]], provider: str = "openai") -> Dict[str, Any]:
"""AIを使用して会話の高度な要約と分析を生成"""
# 会話内容を結合
conversation_text = ""
for msg in messages[-20:]: # 最新20メッセージを使用
role = "User" if msg["role"] == "user" else "Assistant"
conversation_text += f"{role}: {msg['content'][:500]}\n"
# プロンプトを構築
analysis_prompt = f"""
以下の会話を分析しJSON形式で以下の情報を抽出してください
1. main_topics: 主なトピック最大5個
2. user_intent: ユーザーの意図や目的
3. key_insights: 重要な洞察や学び最大3個
4. relationship_indicators: 関係性を示す要素
5. emotional_tone: 感情的なトーン
6. action_items: アクションアイテムや次のステップ
7. summary: 100文字以内の要約
会話内容:
{conversation_text}
回答はJSON形式のみで返してください
"""
try:
if provider == "openai" and self.openai_api_key:
return await self._call_openai_api(analysis_prompt)
elif provider == "anthropic" and self.anthropic_api_key:
return await self._call_anthropic_api(analysis_prompt)
else:
# フォールバック:基本的な分析
return self._generate_basic_analysis(messages)
except Exception as e:
print(f"AI analysis failed: {e}")
return self._generate_basic_analysis(messages)
async def _call_openai_api(self, prompt: str) -> Dict[str, Any]:
"""OpenAI APIを呼び出し"""
async with aiohttp.ClientSession() as session:
headers = {
"Authorization": f"Bearer {self.openai_api_key}",
"Content-Type": "application/json"
}
data = {
"model": "gpt-4",
"messages": [{"role": "user", "content": prompt}],
"temperature": 0.3,
"max_tokens": 1000
}
async with session.post("https://api.openai.com/v1/chat/completions",
headers=headers, json=data) as response:
result = await response.json()
content = result["choices"][0]["message"]["content"]
return json.loads(content)
async def _call_anthropic_api(self, prompt: str) -> Dict[str, Any]:
"""Anthropic APIを呼び出し"""
async with aiohttp.ClientSession() as session:
headers = {
"x-api-key": self.anthropic_api_key,
"Content-Type": "application/json",
"anthropic-version": "2023-06-01"
}
data = {
"model": "claude-3-sonnet-20240229",
"max_tokens": 1000,
"messages": [{"role": "user", "content": prompt}]
}
async with session.post("https://api.anthropic.com/v1/messages",
headers=headers, json=data) as response:
result = await response.json()
content = result["content"][0]["text"]
return json.loads(content)
def _generate_basic_analysis(self, messages: List[Dict[str, Any]]) -> Dict[str, Any]:
"""基本的な分析AI APIが利用できない場合のフォールバック"""
user_messages = [msg for msg in messages if msg["role"] == "user"]
assistant_messages = [msg for msg in messages if msg["role"] == "assistant"]
# キーワード抽出(簡易版)
all_text = " ".join([msg["content"] for msg in messages])
words = all_text.lower().split()
word_freq = {}
for word in words:
if len(word) > 3:
word_freq[word] = word_freq.get(word, 0) + 1
top_words = sorted(word_freq.items(), key=lambda x: x[1], reverse=True)[:5]
return {
"main_topics": [word[0] for word in top_words],
"user_intent": "情報収集・問題解決",
"key_insights": ["基本的な会話分析"],
"relationship_indicators": {
"interaction_count": len(messages),
"user_engagement": len(user_messages),
"assistant_helpfulness": len(assistant_messages)
},
"emotional_tone": "neutral",
"action_items": [],
"summary": f"{len(user_messages)}回のやり取りによる会話"
}
class RelationshipTracker:
"""関係性追跡クラス"""
def __init__(self):
init_directories()
self.relationship_file = RELATIONSHIP_DIR / "relationships.json"
self.relationships = self._load_relationships()
def _load_relationships(self) -> Dict[str, Any]:
"""関係性データを読み込み"""
if self.relationship_file.exists():
with open(self.relationship_file, 'r', encoding='utf-8') as f:
return json.load(f)
return {"targets": {}, "last_updated": datetime.now().isoformat()}
def _save_relationships(self):
"""関係性データを保存"""
self.relationships["last_updated"] = datetime.now().isoformat()
with open(self.relationship_file, 'w', encoding='utf-8') as f:
json.dump(self.relationships, f, ensure_ascii=False, indent=2)
def update_relationship(self, target: str, interaction_type: str, weight: float = 1.0, context: str = None):
"""関係性を更新"""
if target not in self.relationships["targets"]:
self.relationships["targets"][target] = {
"score": 0.0,
"interactions": [],
"created_at": datetime.now().isoformat(),
"last_interaction": None
}
# スコア計算
score_change = 0.0
if interaction_type == "positive":
score_change = weight * 1.0
elif interaction_type == "negative":
score_change = weight * -1.0
# 時間減衰を適用
self._apply_time_decay(target)
# スコア更新
current_score = self.relationships["targets"][target]["score"]
new_score = current_score + score_change
# スコアの範囲制限(-100 to 100
new_score = max(-100, min(100, new_score))
self.relationships["targets"][target]["score"] = new_score
self.relationships["targets"][target]["last_interaction"] = datetime.now().isoformat()
# インタラクション履歴を追加
interaction_record = {
"type": interaction_type,
"weight": weight,
"score_change": score_change,
"new_score": new_score,
"timestamp": datetime.now().isoformat(),
"context": context
}
self.relationships["targets"][target]["interactions"].append(interaction_record)
# 履歴は最新100件まで保持
if len(self.relationships["targets"][target]["interactions"]) > 100:
self.relationships["targets"][target]["interactions"] = \
self.relationships["targets"][target]["interactions"][-100:]
self._save_relationships()
return new_score
def _apply_time_decay(self, target: str):
"""時間減衰を適用"""
target_data = self.relationships["targets"][target]
last_interaction = target_data.get("last_interaction")
if last_interaction:
last_time = datetime.fromisoformat(last_interaction)
now = datetime.now()
days_passed = (now - last_time).days
# 7日ごとに5%減衰
if days_passed > 0:
decay_factor = 0.95 ** (days_passed / 7)
target_data["score"] *= decay_factor
def get_relationship_score(self, target: str) -> float:
"""関係性スコアを取得"""
if target in self.relationships["targets"]:
self._apply_time_decay(target)
return self.relationships["targets"][target]["score"]
return 0.0
def should_send_message(self, target: str, threshold: float = 50.0) -> bool:
"""メッセージ送信の可否を判定"""
score = self.get_relationship_score(target)
return score >= threshold
def get_all_relationships(self) -> Dict[str, Any]:
"""すべての関係性を取得"""
# 全ターゲットに時間減衰を適用
for target in self.relationships["targets"]:
self._apply_time_decay(target)
return self.relationships
class MemoryManager:
"""記憶管理クラスAI処理機能付き"""
def __init__(self):
init_directories()
self.ai_processor = AIMemoryProcessor()
self.relationship_tracker = RelationshipTracker()
def parse_chatgpt_conversation(self, conversation_data: Dict[str, Any]) -> List[Dict[str, Any]]:
"""ChatGPTの会話データを解析してメッセージを抽出"""
messages = []
mapping = conversation_data.get("mapping", {})
# メッセージを時系列順に並べる
message_nodes = []
for node_id, node in mapping.items():
message = node.get("message")
if not message:
continue
content = message.get("content", {})
parts = content.get("parts", [])
if parts and isinstance(parts[0], str) and parts[0].strip():
message_nodes.append({
"id": node_id,
"create_time": message.get("create_time", 0),
"author_role": message["author"]["role"],
"content": parts[0],
"parent": node.get("parent")
})
# 作成時間でソート
message_nodes.sort(key=lambda x: x["create_time"] or 0)
for msg in message_nodes:
if msg["author_role"] in ["user", "assistant"]:
messages.append({
"role": msg["author_role"],
"content": msg["content"],
"timestamp": msg["create_time"],
"message_id": msg["id"]
})
return messages
async def save_chatgpt_memory(self, conversation_data: Dict[str, Any], process_with_ai: bool = True) -> str:
"""ChatGPTの会話を記憶として保存AI処理オプション付き"""
title = conversation_data.get("title", "untitled")
create_time = conversation_data.get("create_time", datetime.now().timestamp())
# メッセージを解析
messages = self.parse_chatgpt_conversation(conversation_data)
if not messages:
raise ValueError("No valid messages found in conversation")
# AI分析を実行
ai_analysis = None
if process_with_ai:
try:
ai_analysis = await self.ai_processor.generate_ai_summary(messages)
except Exception as e:
print(f"AI analysis failed: {e}")
# 基本要約を生成
basic_summary = self.generate_basic_summary(messages)
# 保存データを作成
memory_data = {
"title": title,
"source": "chatgpt",
"import_time": datetime.now().isoformat(),
"original_create_time": create_time,
"messages": messages,
"basic_summary": basic_summary,
"ai_analysis": ai_analysis,
"message_count": len(messages),
"hash": self._generate_content_hash(messages)
}
# 関係性データを更新
if ai_analysis and "relationship_indicators" in ai_analysis:
interaction_count = ai_analysis["relationship_indicators"].get("interaction_count", 0)
if interaction_count > 10: # 長い会話は関係性にプラス
self.relationship_tracker.update_relationship(
target="user_general",
interaction_type="positive",
weight=min(interaction_count / 10, 5.0),
context=f"Long conversation: {title}"
)
# ファイル名を生成
safe_title = "".join(c for c in title if c.isalnum() or c in (' ', '-', '_')).rstrip()
timestamp = datetime.fromtimestamp(create_time).strftime("%Y%m%d_%H%M%S")
filename = f"{timestamp}_{safe_title[:50]}.json"
filepath = CHATGPT_MEMORY_DIR / filename
with open(filepath, 'w', encoding='utf-8') as f:
json.dump(memory_data, f, ensure_ascii=False, indent=2)
# 処理済みメモリディレクトリにも保存
if ai_analysis:
processed_filepath = PROCESSED_MEMORY_DIR / filename
with open(processed_filepath, 'w', encoding='utf-8') as f:
json.dump(memory_data, f, ensure_ascii=False, indent=2)
return str(filepath)
def generate_basic_summary(self, messages: List[Dict[str, Any]]) -> str:
"""基本要約を生成"""
if not messages:
return "Empty conversation"
user_messages = [msg for msg in messages if msg["role"] == "user"]
assistant_messages = [msg for msg in messages if msg["role"] == "assistant"]
summary = f"Conversation with {len(user_messages)} user messages and {len(assistant_messages)} assistant responses. "
if user_messages:
first_user_msg = user_messages[0]["content"][:100]
summary += f"Started with: {first_user_msg}..."
return summary
def _generate_content_hash(self, messages: List[Dict[str, Any]]) -> str:
"""メッセージ内容のハッシュを生成"""
content = "".join([msg["content"] for msg in messages])
return hashlib.sha256(content.encode()).hexdigest()[:16]
def search_memories(self, query: str, limit: int = 10, use_ai_analysis: bool = True) -> List[Dict[str, Any]]:
"""記憶を検索AI分析結果も含む"""
results = []
# 処理済みメモリから検索
search_dirs = [PROCESSED_MEMORY_DIR, CHATGPT_MEMORY_DIR] if use_ai_analysis else [CHATGPT_MEMORY_DIR]
for search_dir in search_dirs:
for filepath in search_dir.glob("*.json"):
try:
with open(filepath, 'r', encoding='utf-8') as f:
memory_data = json.load(f)
# 検索対象テキストを構築
search_text = f"{memory_data.get('title', '')} {memory_data.get('basic_summary', '')}"
# AI分析結果も検索対象に含める
if memory_data.get('ai_analysis'):
ai_analysis = memory_data['ai_analysis']
search_text += f" {' '.join(ai_analysis.get('main_topics', []))}"
search_text += f" {ai_analysis.get('summary', '')}"
search_text += f" {' '.join(ai_analysis.get('key_insights', []))}"
# メッセージ内容も検索対象に含める
for msg in memory_data.get('messages', []):
search_text += f" {msg.get('content', '')}"
if query.lower() in search_text.lower():
result = {
"filepath": str(filepath),
"title": memory_data.get("title"),
"basic_summary": memory_data.get("basic_summary"),
"source": memory_data.get("source"),
"import_time": memory_data.get("import_time"),
"message_count": len(memory_data.get("messages", [])),
"has_ai_analysis": bool(memory_data.get("ai_analysis"))
}
if memory_data.get('ai_analysis'):
result["ai_summary"] = memory_data['ai_analysis'].get('summary', '')
result["main_topics"] = memory_data['ai_analysis'].get('main_topics', [])
results.append(result)
if len(results) >= limit:
break
except Exception as e:
print(f"Error reading memory file {filepath}: {e}")
continue
if len(results) >= limit:
break
return results
def get_memory_detail(self, filepath: str) -> Dict[str, Any]:
"""記憶の詳細を取得"""
try:
with open(filepath, 'r', encoding='utf-8') as f:
return json.load(f)
except Exception as e:
raise ValueError(f"Error reading memory file: {e}")
def list_all_memories(self) -> List[Dict[str, Any]]:
"""すべての記憶をリスト"""
memories = []
for filepath in CHATGPT_MEMORY_DIR.glob("*.json"):
try:
with open(filepath, 'r', encoding='utf-8') as f:
memory_data = json.load(f)
memory_info = {
"filepath": str(filepath),
"title": memory_data.get("title"),
"basic_summary": memory_data.get("basic_summary"),
"source": memory_data.get("source"),
"import_time": memory_data.get("import_time"),
"message_count": len(memory_data.get("messages", [])),
"has_ai_analysis": bool(memory_data.get("ai_analysis"))
}
if memory_data.get('ai_analysis'):
memory_info["ai_summary"] = memory_data['ai_analysis'].get('summary', '')
memory_info["main_topics"] = memory_data['ai_analysis'].get('main_topics', [])
memories.append(memory_info)
except Exception as e:
print(f"Error reading memory file {filepath}: {e}")
continue
# インポート時間でソート
memories.sort(key=lambda x: x.get("import_time", ""), reverse=True)
return memories
# FastAPI アプリケーション
app = FastAPI(title="AigptMCP Server with AI Memory", version="2.0.0")
memory_manager = MemoryManager()
@app.post("/memory/import/chatgpt")
async def import_chatgpt_conversation(data: ConversationImport, process_with_ai: bool = True):
"""ChatGPTの会話をインポートAI処理オプション付き"""
try:
filepath = await memory_manager.save_chatgpt_memory(data.conversation_data, process_with_ai)
return {
"success": True,
"message": "Conversation imported successfully",
"filepath": filepath,
"ai_processed": process_with_ai
}
except Exception as e:
raise HTTPException(status_code=400, detail=str(e))
@app.post("/memory/process-ai")
async def process_memory_with_ai(data: MemorySummaryRequest):
"""既存の記憶をAIで再処理"""
try:
# 既存記憶を読み込み
memory_data = memory_manager.get_memory_detail(data.filepath)
# AI分析を実行
ai_analysis = await memory_manager.ai_processor.generate_ai_summary(
memory_data["messages"],
data.ai_provider
)
# データを更新
memory_data["ai_analysis"] = ai_analysis
memory_data["ai_processed_at"] = datetime.now().isoformat()
# ファイルを更新
with open(data.filepath, 'w', encoding='utf-8') as f:
json.dump(memory_data, f, ensure_ascii=False, indent=2)
# 処理済みディレクトリにもコピー
processed_filepath = PROCESSED_MEMORY_DIR / Path(data.filepath).name
with open(processed_filepath, 'w', encoding='utf-8') as f:
json.dump(memory_data, f, ensure_ascii=False, indent=2)
return {
"success": True,
"message": "Memory processed with AI successfully",
"ai_analysis": ai_analysis
}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@app.post("/memory/search")
async def search_memories(query: MemoryQuery):
"""記憶を検索"""
try:
results = memory_manager.search_memories(query.query, query.limit)
return {
"success": True,
"results": results,
"count": len(results)
}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@app.get("/memory/list")
async def list_memories():
"""すべての記憶をリスト"""
try:
memories = memory_manager.list_all_memories()
return {
"success": True,
"memories": memories,
"count": len(memories)
}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@app.get("/memory/detail")
async def get_memory_detail(filepath: str):
"""記憶の詳細を取得"""
try:
detail = memory_manager.get_memory_detail(filepath)
return {
"success": True,
"memory": detail
}
except Exception as e:
raise HTTPException(status_code=404, detail=str(e))
@app.post("/relationship/update")
async def update_relationship(data: RelationshipUpdate):
"""関係性を更新"""
try:
new_score = memory_manager.relationship_tracker.update_relationship(
data.target, data.interaction_type, data.weight, data.context
)
return {
"success": True,
"new_score": new_score,
"can_send_message": memory_manager.relationship_tracker.should_send_message(data.target)
}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@app.get("/relationship/list")
async def list_relationships():
"""すべての関係性をリスト"""
try:
relationships = memory_manager.relationship_tracker.get_all_relationships()
return {
"success": True,
"relationships": relationships
}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@app.get("/relationship/check")
async def check_send_permission(target: str, threshold: float = 50.0):
"""メッセージ送信可否をチェック"""
try:
score = memory_manager.relationship_tracker.get_relationship_score(target)
can_send = memory_manager.relationship_tracker.should_send_message(target, threshold)
return {
"success": True,
"target": target,
"score": score,
"can_send_message": can_send,
"threshold": threshold
}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@app.post("/chat")
async def chat_endpoint(data: ChatMessage):
"""チャット機能(記憶と関係性を活用)"""
try:
# 関連する記憶を検索
memories = memory_manager.search_memories(data.message, limit=3)
# メモリのコンテキストを構築
memory_context = ""
if memories:
memory_context = "\n# Related memories:\n"
for memory in memories:
memory_context += f"- {memory['title']}: {memory.get('ai_summary', memory.get('basic_summary', ''))}\n"
if memory.get('main_topics'):
memory_context += f" Topics: {', '.join(memory['main_topics'])}\n"
# 関係性情報を取得
relationships = memory_manager.relationship_tracker.get_all_relationships()
# 実際のチャット処理
enhanced_message = data.message
if memory_context:
enhanced_message = f"{data.message}\n\n{memory_context}"
return {
"success": True,
"response": f"Enhanced response with memory context: {enhanced_message}",
"memories_used": len(memories),
"relationship_info": {
"active_relationships": len(relationships.get("targets", {})),
"can_initiate_conversations": sum(1 for target, data in relationships.get("targets", {}).items()
if memory_manager.relationship_tracker.should_send_message(target))
}
}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@app.get("/")
async def root():
"""ヘルスチェック"""
return {
"service": "AigptMCP Server with AI Memory",
"version": "2.0.0",
"status": "running",
"memory_dir": str(MEMORY_DIR),
"features": [
"AI-powered memory analysis",
"Relationship tracking",
"Advanced memory search",
"Conversation import",
"Auto-summary generation"
],
"endpoints": [
"/memory/import/chatgpt",
"/memory/process-ai",
"/memory/search",
"/memory/list",
"/memory/detail",
"/relationship/update",
"/relationship/list",
"/relationship/check",
"/chat"
]
}
if __name__ == "__main__":
print("🚀 AigptMCP Server with AI Memory starting...")
print(f"📁 Memory directory: {MEMORY_DIR}")
print(f"🧠 AI Memory processing: {'✅ Enabled' if os.getenv('OPENAI_API_KEY') or os.getenv('ANTHROPIC_API_KEY') else '❌ Disabled (no API keys)'}")
uvicorn.run(app, host="127.0.0.1", port=5000)

View File

@ -1,64 +0,0 @@
// src/cli.rs
use clap::{Parser, Subcommand};
#[derive(Parser)]
#[command(name = "aigpt")]
#[command(about = "AI GPT CLI with MCP Server and Memory")]
pub struct Args {
#[command(subcommand)]
pub command: Commands,
}
#[derive(Subcommand)]
pub enum Commands {
/// MCP Server management
Server {
#[command(subcommand)]
command: ServerCommands,
},
/// Chat with AI
Chat {
/// Message to send
message: String,
/// Use memory context
#[arg(long)]
with_memory: bool,
},
/// Memory management
Memory {
#[command(subcommand)]
command: MemoryCommands,
},
}
#[derive(Subcommand)]
pub enum ServerCommands {
/// Setup Python MCP server environment
Setup,
/// Run the MCP server
Run,
}
#[derive(Subcommand)]
pub enum MemoryCommands {
/// Import ChatGPT conversation export file
Import {
/// Path to ChatGPT export JSON file
file: String,
},
/// Search memories
Search {
/// Search query
query: String,
/// Maximum number of results
#[arg(short, long, default_value = "10")]
limit: usize,
},
/// List all memories
List,
/// Show memory details
Detail {
/// Path to memory file
filepath: String,
},
}

View File

@ -1,59 +0,0 @@
// src/config.rs
use std::fs;
use std::path::{Path, PathBuf};
use shellexpand;
pub struct ConfigPaths {
pub base_dir: PathBuf,
}
impl ConfigPaths {
pub fn new() -> Self {
let app_name = env!("CARGO_PKG_NAME");
let mut base_dir = shellexpand::tilde("~").to_string();
base_dir.push_str(&format!("/.config/{}/", app_name));
let base_path = Path::new(&base_dir);
if !base_path.exists() {
let _ = fs::create_dir_all(base_path);
}
ConfigPaths {
base_dir: base_path.to_path_buf(),
}
}
#[allow(dead_code)]
pub fn data_file(&self, file_name: &str) -> PathBuf {
let file_path = match file_name {
"db" => self.base_dir.join("user.db"),
"toml" => self.base_dir.join("user.toml"),
"json" => self.base_dir.join("user.json"),
_ => self.base_dir.join(format!(".{}", file_name)),
};
file_path
}
pub fn mcp_dir(&self) -> PathBuf {
self.base_dir.join("mcp")
}
pub fn venv_path(&self) -> PathBuf {
self.mcp_dir().join(".venv")
}
pub fn python_executable(&self) -> PathBuf {
if cfg!(windows) {
self.venv_path().join("Scripts").join("python.exe")
} else {
self.venv_path().join("bin").join("python")
}
}
pub fn pip_executable(&self) -> PathBuf {
if cfg!(windows) {
self.venv_path().join("Scripts").join("pip.exe")
} else {
self.venv_path().join("bin").join("pip")
}
}
}

View File

@ -1,58 +0,0 @@
// main.rs
mod cli;
mod config;
mod mcp;
use cli::{Args, Commands, ServerCommands, MemoryCommands};
use clap::Parser;
#[tokio::main]
async fn main() {
let args = Args::parse();
match args.command {
Commands::Server { command } => {
match command {
ServerCommands::Setup => {
mcp::server::setup();
}
ServerCommands::Run => {
mcp::server::run().await;
}
}
}
Commands::Chat { message, with_memory } => {
if with_memory {
if let Err(e) = mcp::memory::handle_chat_with_memory(&message).await {
eprintln!("❌ 記憶チャットエラー: {}", e);
}
} else {
mcp::server::chat(&message).await;
}
}
Commands::Memory { command } => {
match command {
MemoryCommands::Import { file } => {
if let Err(e) = mcp::memory::handle_import(&file).await {
eprintln!("❌ インポートエラー: {}", e);
}
}
MemoryCommands::Search { query, limit } => {
if let Err(e) = mcp::memory::handle_search(&query, limit).await {
eprintln!("❌ 検索エラー: {}", e);
}
}
MemoryCommands::List => {
if let Err(e) = mcp::memory::handle_list().await {
eprintln!("❌ 一覧取得エラー: {}", e);
}
}
MemoryCommands::Detail { filepath } => {
if let Err(e) = mcp::memory::handle_detail(&filepath).await {
eprintln!("❌ 詳細取得エラー: {}", e);
}
}
}
}
}
}

View File

@ -1,393 +0,0 @@
// src/mcp/memory.rs
use reqwest;
use serde::{Deserialize, Serialize};
use serde_json::{self, Value};
use std::fs;
use std::path::Path;
#[derive(Debug, Serialize, Deserialize)]
pub struct MemorySearchRequest {
pub query: String,
pub limit: usize,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct ChatRequest {
pub message: String,
pub model: Option<String>,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct ConversationImportRequest {
pub conversation_data: Value,
}
#[derive(Debug, Deserialize)]
pub struct ApiResponse {
pub success: bool,
pub error: Option<String>,
#[allow(dead_code)]
pub message: Option<String>,
pub filepath: Option<String>,
pub results: Option<Vec<MemoryResult>>,
pub memories: Option<Vec<MemoryResult>>,
#[allow(dead_code)]
pub count: Option<usize>,
pub memory: Option<Value>,
pub response: Option<String>,
pub memories_used: Option<usize>,
pub imported_count: Option<usize>,
pub total_count: Option<usize>,
}
#[derive(Debug, Deserialize)]
pub struct MemoryResult {
#[allow(dead_code)]
pub filepath: String,
pub title: Option<String>,
pub summary: Option<String>,
pub source: Option<String>,
pub import_time: Option<String>,
pub message_count: Option<usize>,
}
pub struct MemoryClient {
base_url: String,
client: reqwest::Client,
}
impl MemoryClient {
pub fn new(base_url: Option<String>) -> Self {
let url = base_url.unwrap_or_else(|| "http://127.0.0.1:5000".to_string());
Self {
base_url: url,
client: reqwest::Client::new(),
}
}
pub async fn import_chatgpt_file(&self, filepath: &str) -> Result<ApiResponse, Box<dyn std::error::Error>> {
// ファイルを読み込み
let content = fs::read_to_string(filepath)?;
let json_data: Value = serde_json::from_str(&content)?;
// 配列かどうかチェック
match json_data.as_array() {
Some(conversations) => {
// 複数の会話をインポート
let mut imported_count = 0;
let total_count = conversations.len();
for conversation in conversations {
match self.import_single_conversation(conversation.clone()).await {
Ok(response) => {
if response.success {
imported_count += 1;
}
}
Err(e) => {
eprintln!("❌ インポートエラー: {}", e);
}
}
}
Ok(ApiResponse {
success: true,
imported_count: Some(imported_count),
total_count: Some(total_count),
error: None,
message: Some(format!("{}個中{}個の会話をインポートしました", total_count, imported_count)),
filepath: None,
results: None,
memories: None,
count: None,
memory: None,
response: None,
memories_used: None,
})
}
None => {
// 単一の会話をインポート
self.import_single_conversation(json_data).await
}
}
}
async fn import_single_conversation(&self, conversation_data: Value) -> Result<ApiResponse, Box<dyn std::error::Error>> {
let request = ConversationImportRequest { conversation_data };
let response = self.client
.post(&format!("{}/memory/import/chatgpt", self.base_url))
.json(&request)
.send()
.await?;
let result: ApiResponse = response.json().await?;
Ok(result)
}
pub async fn search_memories(&self, query: &str, limit: usize) -> Result<ApiResponse, Box<dyn std::error::Error>> {
let request = MemorySearchRequest {
query: query.to_string(),
limit,
};
let response = self.client
.post(&format!("{}/memory/search", self.base_url))
.json(&request)
.send()
.await?;
let result: ApiResponse = response.json().await?;
Ok(result)
}
pub async fn list_memories(&self) -> Result<ApiResponse, Box<dyn std::error::Error>> {
let response = self.client
.get(&format!("{}/memory/list", self.base_url))
.send()
.await?;
let result: ApiResponse = response.json().await?;
Ok(result)
}
pub async fn get_memory_detail(&self, filepath: &str) -> Result<ApiResponse, Box<dyn std::error::Error>> {
let response = self.client
.get(&format!("{}/memory/detail", self.base_url))
.query(&[("filepath", filepath)])
.send()
.await?;
let result: ApiResponse = response.json().await?;
Ok(result)
}
pub async fn chat_with_memory(&self, message: &str) -> Result<ApiResponse, Box<dyn std::error::Error>> {
let request = ChatRequest {
message: message.to_string(),
model: None,
};
let response = self.client
.post(&format!("{}/chat", self.base_url))
.json(&request)
.send()
.await?;
let result: ApiResponse = response.json().await?;
Ok(result)
}
pub async fn is_server_running(&self) -> bool {
match self.client.get(&self.base_url).send().await {
Ok(response) => response.status().is_success(),
Err(_) => false,
}
}
}
pub async fn handle_import(filepath: &str) -> Result<(), Box<dyn std::error::Error>> {
if !Path::new(filepath).exists() {
eprintln!("❌ ファイルが見つかりません: {}", filepath);
return Ok(());
}
let client = MemoryClient::new(None);
// サーバーが起動しているかチェック
if !client.is_server_running().await {
eprintln!("❌ MCP Serverが起動していません。先に 'aigpt server run' を実行してください。");
return Ok(());
}
println!("🔄 ChatGPT会話をインポートしています: {}", filepath);
match client.import_chatgpt_file(filepath).await {
Ok(response) => {
if response.success {
if let (Some(imported), Some(total)) = (response.imported_count, response.total_count) {
println!("{}個中{}個の会話をインポートしました", total, imported);
} else {
println!("✅ 会話をインポートしました");
if let Some(path) = response.filepath {
println!("📁 保存先: {}", path);
}
}
} else {
eprintln!("❌ インポートに失敗: {:?}", response.error);
}
}
Err(e) => {
eprintln!("❌ インポートエラー: {}", e);
}
}
Ok(())
}
pub async fn handle_search(query: &str, limit: usize) -> Result<(), Box<dyn std::error::Error>> {
let client = MemoryClient::new(None);
if !client.is_server_running().await {
eprintln!("❌ MCP Serverが起動していません。先に 'aigpt server run' を実行してください。");
return Ok(());
}
println!("🔍 記憶を検索しています: {}", query);
match client.search_memories(query, limit).await {
Ok(response) => {
if response.success {
if let Some(results) = response.results {
println!("📚 {}個の記憶が見つかりました:", results.len());
for memory in results {
println!("{}", memory.title.unwrap_or_else(|| "タイトルなし".to_string()));
if let Some(summary) = memory.summary {
println!(" 概要: {}", summary);
}
if let Some(count) = memory.message_count {
println!(" メッセージ数: {}", count);
}
println!();
}
} else {
println!("📚 記憶が見つかりませんでした");
}
} else {
eprintln!("❌ 検索に失敗: {:?}", response.error);
}
}
Err(e) => {
eprintln!("❌ 検索エラー: {}", e);
}
}
Ok(())
}
pub async fn handle_list() -> Result<(), Box<dyn std::error::Error>> {
let client = MemoryClient::new(None);
if !client.is_server_running().await {
eprintln!("❌ MCP Serverが起動していません。先に 'aigpt server run' を実行してください。");
return Ok(());
}
println!("📋 記憶一覧を取得しています...");
match client.list_memories().await {
Ok(response) => {
if response.success {
if let Some(memories) = response.memories {
println!("📚 総記憶数: {}", memories.len());
for memory in memories {
println!("{}", memory.title.unwrap_or_else(|| "タイトルなし".to_string()));
if let Some(source) = memory.source {
println!(" ソース: {}", source);
}
if let Some(count) = memory.message_count {
println!(" メッセージ数: {}", count);
}
if let Some(import_time) = memory.import_time {
println!(" インポート時刻: {}", import_time);
}
println!();
}
} else {
println!("📚 記憶がありません");
}
} else {
eprintln!("❌ 一覧取得に失敗: {:?}", response.error);
}
}
Err(e) => {
eprintln!("❌ 一覧取得エラー: {}", e);
}
}
Ok(())
}
pub async fn handle_detail(filepath: &str) -> Result<(), Box<dyn std::error::Error>> {
let client = MemoryClient::new(None);
if !client.is_server_running().await {
eprintln!("❌ MCP Serverが起動していません。先に 'aigpt server run' を実行してください。");
return Ok(());
}
println!("📄 記憶の詳細を取得しています: {}", filepath);
match client.get_memory_detail(filepath).await {
Ok(response) => {
if response.success {
if let Some(memory) = response.memory {
if let Some(title) = memory.get("title").and_then(|v| v.as_str()) {
println!("タイトル: {}", title);
}
if let Some(source) = memory.get("source").and_then(|v| v.as_str()) {
println!("ソース: {}", source);
}
if let Some(summary) = memory.get("summary").and_then(|v| v.as_str()) {
println!("概要: {}", summary);
}
if let Some(messages) = memory.get("messages").and_then(|v| v.as_array()) {
println!("メッセージ数: {}", messages.len());
println!("\n最近のメッセージ:");
for msg in messages.iter().take(5) {
if let (Some(role), Some(content)) = (
msg.get("role").and_then(|v| v.as_str()),
msg.get("content").and_then(|v| v.as_str())
) {
let content_preview = if content.len() > 100 {
format!("{}...", &content[..100])
} else {
content.to_string()
};
println!(" {}: {}", role, content_preview);
}
}
}
}
} else {
eprintln!("❌ 詳細取得に失敗: {:?}", response.error);
}
}
Err(e) => {
eprintln!("❌ 詳細取得エラー: {}", e);
}
}
Ok(())
}
pub async fn handle_chat_with_memory(message: &str) -> Result<(), Box<dyn std::error::Error>> {
let client = MemoryClient::new(None);
if !client.is_server_running().await {
eprintln!("❌ MCP Serverが起動していません。先に 'aigpt server run' を実行してください。");
return Ok(());
}
println!("💬 記憶を活用してチャットしています...");
match client.chat_with_memory(message).await {
Ok(response) => {
if response.success {
if let Some(reply) = response.response {
println!("🤖 {}", reply);
}
if let Some(memories_used) = response.memories_used {
println!("📚 使用した記憶数: {}", memories_used);
}
} else {
eprintln!("❌ チャットに失敗: {:?}", response.error);
}
}
Err(e) => {
eprintln!("❌ チャットエラー: {}", e);
}
}
Ok(())
}

View File

@ -1,3 +0,0 @@
// src/mcp/mod.rs
pub mod server;
pub mod memory;

View File

@ -1,147 +0,0 @@
// src/mcp/server.rs
use crate::config::ConfigPaths;
//use std::fs;
use std::process::Command as OtherCommand;
use std::env;
use fs_extra::dir::{copy, CopyOptions};
pub fn setup() {
println!("🔧 MCP Server環境をセットアップしています...");
let config = ConfigPaths::new();
let mcp_dir = config.mcp_dir();
// プロジェクトのmcp/ディレクトリからファイルをコピー
let current_dir = env::current_dir().expect("現在のディレクトリを取得できません");
let project_mcp_dir = current_dir.join("mcp");
if !project_mcp_dir.exists() {
eprintln!("❌ プロジェクトのmcp/ディレクトリが見つかりません: {}", project_mcp_dir.display());
return;
}
if mcp_dir.exists() {
fs_extra::dir::remove(&mcp_dir).expect("既存のmcp_dirの削除に失敗しました");
}
let mut options = CopyOptions::new();
options.overwrite = true; // 上書き
options.copy_inside = true; // 中身だけコピー
copy(&project_mcp_dir, &mcp_dir, &options).expect("コピーに失敗しました");
// 仮想環境の作成
let venv_path = config.venv_path();
if !venv_path.exists() {
println!("🐍 仮想環境を作成しています...");
let output = OtherCommand::new("python3")
.args(&["-m", "venv", ".venv"])
.current_dir(&mcp_dir)
.output()
.expect("venvの作成に失敗しました");
if !output.status.success() {
eprintln!("❌ venv作成エラー: {}", String::from_utf8_lossy(&output.stderr));
return;
}
println!("✅ 仮想環境を作成しました");
} else {
println!("✅ 仮想環境は既に存在します");
}
// 依存関係のインストール
println!("📦 依存関係をインストールしています...");
let pip_path = config.pip_executable();
let output = OtherCommand::new(&pip_path)
.args(&["install", "-r", "requirements.txt"])
.current_dir(&mcp_dir)
.output()
.expect("pipコマンドの実行に失敗しました");
if !output.status.success() {
eprintln!("❌ pip installエラー: {}", String::from_utf8_lossy(&output.stderr));
return;
}
println!("✅ MCP Server環境のセットアップが完了しました!");
println!("📍 セットアップ場所: {}", mcp_dir.display());
}
pub async fn run() {
println!("🚀 MCP Serverを起動しています...");
let config = ConfigPaths::new();
let mcp_dir = config.mcp_dir();
let python_path = config.python_executable();
let server_py_path = mcp_dir.join("server.py");
// セットアップの確認
if !server_py_path.exists() {
eprintln!("❌ server.pyが見つかりません。先に 'aigpt server setup' を実行してください。");
return;
}
if !python_path.exists() {
eprintln!("❌ Python実行ファイルが見つかりません。先に 'aigpt server setup' を実行してください。");
return;
}
// サーバーの起動
println!("🔗 サーバーを起動中... (Ctrl+Cで停止)");
let mut child = OtherCommand::new(&python_path)
.arg("server.py")
.current_dir(&mcp_dir)
.spawn()
.expect("MCP Serverの起動に失敗しました");
// サーバーの終了を待機
match child.wait() {
Ok(status) => {
if status.success() {
println!("✅ MCP Serverが正常に終了しました");
} else {
println!("❌ MCP Serverが異常終了しました: {}", status);
}
}
Err(e) => {
eprintln!("❌ MCP Serverの実行中にエラーが発生しました: {}", e);
}
}
}
pub async fn chat(message: &str) {
println!("💬 チャットを開始しています...");
let config = ConfigPaths::new();
let mcp_dir = config.mcp_dir();
let python_path = config.python_executable();
let chat_py_path = mcp_dir.join("chat.py");
// セットアップの確認
if !chat_py_path.exists() {
eprintln!("❌ chat.pyが見つかりません。先に 'aigpt server setup' を実行してください。");
return;
}
if !python_path.exists() {
eprintln!("❌ Python実行ファイルが見つかりません。先に 'aigpt server setup' を実行してください。");
return;
}
// チャットの実行
let output = OtherCommand::new(&python_path)
.args(&["chat.py", message])
.current_dir(&mcp_dir)
.output()
.expect("chat.pyの実行に失敗しました");
if output.status.success() {
let stdout = String::from_utf8_lossy(&output.stdout);
let stderr = String::from_utf8_lossy(&output.stderr);
if !stderr.is_empty() {
print!("{}", stderr);
}
print!("{}", stdout);
} else {
eprintln!("❌ チャット実行エラー: {}", String::from_utf8_lossy(&output.stderr));
}
}

View File

@ -0,0 +1,17 @@
Metadata-Version: 2.4
Name: aigpt
Version: 0.1.0
Summary: Autonomous transmission AI with unique personality based on relationship parameters
Requires-Python: >=3.10
Requires-Dist: click>=8.0.0
Requires-Dist: typer>=0.9.0
Requires-Dist: fastapi-mcp>=0.1.0
Requires-Dist: pydantic>=2.0.0
Requires-Dist: httpx>=0.24.0
Requires-Dist: rich>=13.0.0
Requires-Dist: python-dotenv>=1.0.0
Requires-Dist: ollama>=0.1.0
Requires-Dist: openai>=1.0.0
Requires-Dist: uvicorn>=0.23.0
Requires-Dist: apscheduler>=3.10.0
Requires-Dist: croniter>=1.3.0

View File

@ -0,0 +1,20 @@
README.md
pyproject.toml
src/aigpt/__init__.py
src/aigpt/ai_provider.py
src/aigpt/cli.py
src/aigpt/config.py
src/aigpt/fortune.py
src/aigpt/mcp_server.py
src/aigpt/memory.py
src/aigpt/models.py
src/aigpt/persona.py
src/aigpt/relationship.py
src/aigpt/scheduler.py
src/aigpt/transmission.py
src/aigpt.egg-info/PKG-INFO
src/aigpt.egg-info/SOURCES.txt
src/aigpt.egg-info/dependency_links.txt
src/aigpt.egg-info/entry_points.txt
src/aigpt.egg-info/requires.txt
src/aigpt.egg-info/top_level.txt

View File

@ -0,0 +1 @@

View File

@ -0,0 +1,2 @@
[console_scripts]
aigpt = aigpt.cli:app

View File

@ -0,0 +1,12 @@
click>=8.0.0
typer>=0.9.0
fastapi-mcp>=0.1.0
pydantic>=2.0.0
httpx>=0.24.0
rich>=13.0.0
python-dotenv>=1.0.0
ollama>=0.1.0
openai>=1.0.0
uvicorn>=0.23.0
apscheduler>=3.10.0
croniter>=1.3.0

View File

@ -0,0 +1 @@
aigpt

View File

@ -7,6 +7,12 @@ from rich.console import Console
from rich.table import Table from rich.table import Table
from rich.panel import Panel from rich.panel import Panel
from datetime import datetime, timedelta from datetime import datetime, timedelta
import subprocess
import shlex
from prompt_toolkit import prompt
from prompt_toolkit.completion import WordCompleter
from prompt_toolkit.history import FileHistory
from prompt_toolkit.auto_suggest import AutoSuggestFromHistory
from .persona import Persona from .persona import Persona
from .transmission import TransmissionController from .transmission import TransmissionController
@ -369,6 +375,176 @@ def schedule(
console.print("Valid actions: add, list, enable, disable, remove, run") console.print("Valid actions: add, list, enable, disable, remove, run")
@app.command()
def shell(
data_dir: Optional[Path] = typer.Option(None, "--data-dir", "-d", help="Data directory"),
model: Optional[str] = typer.Option("qwen2.5", "--model", "-m", help="AI model to use"),
provider: Optional[str] = typer.Option("ollama", "--provider", help="AI provider (ollama/openai)")
):
"""Interactive shell mode (ai.shell)"""
persona = get_persona(data_dir)
# Create AI provider
ai_provider = None
if provider and model:
try:
ai_provider = create_ai_provider(provider, model)
console.print(f"[dim]Using {provider} with model {model}[/dim]\n")
except Exception as e:
console.print(f"[yellow]Warning: Could not create AI provider: {e}[/yellow]")
console.print("[yellow]Falling back to simple responses[/yellow]\n")
# Welcome message
console.print(Panel(
"[cyan]Welcome to ai.shell[/cyan]\n\n"
"Interactive AI-powered shell with command execution\n\n"
"Commands:\n"
" help - Show available commands\n"
" exit/quit - Exit shell\n"
" !<command> - Execute shell command\n"
" chat <message> - Chat with AI\n"
" status - Show AI status\n"
" clear - Clear screen\n\n"
"Type any message to interact with AI",
title="ai.shell",
border_style="green"
))
# Command completer
commands = ['help', 'exit', 'quit', 'chat', 'status', 'clear', 'fortune', 'relationships']
completer = WordCompleter(commands)
# History file
history_file = data_dir / "shell_history.txt"
history = FileHistory(str(history_file))
# Main shell loop
current_user = "shell_user" # Default user for shell sessions
while True:
try:
# Get input with completion
user_input = prompt(
"ai.shell> ",
completer=completer,
history=history,
auto_suggest=AutoSuggestFromHistory()
).strip()
if not user_input:
continue
# Exit commands
if user_input.lower() in ['exit', 'quit']:
console.print("[cyan]Goodbye![/cyan]")
break
# Help command
elif user_input.lower() == 'help':
console.print(Panel(
"[cyan]ai.shell Commands:[/cyan]\n\n"
" help - Show this help message\n"
" exit/quit - Exit the shell\n"
" !<command> - Execute a shell command\n"
" chat <message> - Explicitly chat with AI\n"
" status - Show AI status\n"
" fortune - Check AI fortune\n"
" relationships - List all relationships\n"
" clear - Clear the screen\n\n"
"You can also type any message to chat with AI\n"
"Use Tab for command completion",
title="Help",
border_style="yellow"
))
# Clear command
elif user_input.lower() == 'clear':
console.clear()
# Shell command execution
elif user_input.startswith('!'):
cmd = user_input[1:].strip()
if cmd:
try:
# Execute command
result = subprocess.run(
shlex.split(cmd),
capture_output=True,
text=True,
shell=False
)
if result.stdout:
console.print(result.stdout.rstrip())
if result.stderr:
console.print(f"[red]{result.stderr.rstrip()}[/red]")
if result.returncode != 0:
console.print(f"[red]Command exited with code {result.returncode}[/red]")
except FileNotFoundError:
console.print(f"[red]Command not found: {cmd.split()[0]}[/red]")
except Exception as e:
console.print(f"[red]Error executing command: {e}[/red]")
# Status command
elif user_input.lower() == 'status':
state = persona.get_current_state()
console.print(f"\nMood: {state.current_mood}")
console.print(f"Fortune: {state.fortune.fortune_value}/10")
rel = persona.relationships.get_or_create_relationship(current_user)
console.print(f"\nRelationship Status: {rel.status.value}")
console.print(f"Score: {rel.score:.2f} / {rel.threshold}")
# Fortune command
elif user_input.lower() == 'fortune':
fortune = persona.fortune_system.get_today_fortune()
fortune_bar = "🌟" * fortune.fortune_value + "" * (10 - fortune.fortune_value)
console.print(f"\n{fortune_bar}")
console.print(f"Today's Fortune: {fortune.fortune_value}/10")
# Relationships command
elif user_input.lower() == 'relationships':
if persona.relationships.relationships:
console.print("\n[cyan]Relationships:[/cyan]")
for user_id, rel in persona.relationships.relationships.items():
console.print(f" {user_id[:16]}... - {rel.status.value} ({rel.score:.2f})")
else:
console.print("[yellow]No relationships yet[/yellow]")
# Chat command or direct message
else:
# Remove 'chat' prefix if present
if user_input.lower().startswith('chat '):
message = user_input[5:].strip()
else:
message = user_input
if message:
# Process interaction with AI
response, relationship_delta = persona.process_interaction(
current_user, message, ai_provider
)
# Display response
console.print(f"\n[cyan]AI:[/cyan] {response}")
# Show relationship change if significant
if abs(relationship_delta) >= 0.1:
if relationship_delta > 0:
console.print(f"[green](+{relationship_delta:.2f} relationship)[/green]")
else:
console.print(f"[red]({relationship_delta:.2f} relationship)[/red]")
except KeyboardInterrupt:
console.print("\n[yellow]Use 'exit' or 'quit' to leave the shell[/yellow]")
except EOFError:
console.print("\n[cyan]Goodbye![/cyan]")
break
except Exception as e:
console.print(f"[red]Error: {e}[/red]")
@app.command() @app.command()
def config( def config(
action: str = typer.Argument(..., help="Action: get, set, delete, list"), action: str = typer.Argument(..., help="Action: get, set, delete, list"),