This commit is contained in:
2025-06-03 03:02:15 +09:00
parent 1755dc2bec
commit 3487535e08
7 changed files with 766 additions and 33 deletions

View File

@ -42,7 +42,8 @@
"Bash(echo:*)",
"Bash(aigpt shell:*)",
"Bash(aigpt maintenance)",
"Bash(aigpt status syui)"
"Bash(aigpt status syui)",
"Bash(cp:*)"
],
"deny": []
}

View File

@ -546,6 +546,97 @@ aigpt maintenance # AI要約を自動実行
aigpt chat syui "記憶システムについて" --provider ollama --model qwen3:latest
```
## 🎉 **TODAY: MCP統合とサーバー表示改善完了** (2025/01/06)
### ✅ **本日の主要な改善**
#### 🚀 **サーバー起動表示の大幅改善**
従来のシンプルな表示から、プロフェッショナルな情報表示に刷新:
```bash
aigpt server
```
**改善前:**
```
Starting ai.gpt MCP Server
Host: localhost:8001
Endpoints: 27 MCP tools
```
**改善後:**
```
🚀 ai.gpt MCP Server
Server Configuration:
🌐 Address: http://localhost:8001
📋 API Docs: http://localhost:8001/docs
💾 Data Directory: /Users/syui/.config/syui/ai/gpt/data
AI Provider Configuration:
🤖 Provider: ollama ✅ http://192.168.11.95:11434
🧩 Model: qwen3
MCP Tools Available (27 total):
🧠 Memory System: 5 tools
🤝 Relationships: 4 tools
⚙️ System State: 3 tools
💻 Shell Integration: 5 tools
🔒 Remote Execution: 4 tools
Integration Status:
✅ MCP Client Ready
🔗 Config: /Users/syui/.config/syui/ai/gpt/config.json
```
#### 🔧 **OpenAI Function Calling + MCP統合の実証**
OpenAI GPT-4o-miniでMCP function callingが完全動作
```bash
aigpt conv test_user --provider openai --model gpt-4o-mini
```
**動作フロー:**
1. **自然言語入力**: 「覚えていることはある?」
2. **自動ツール選択**: OpenAIが`get_memories`を自動呼び出し
3. **MCP通信**: `http://localhost:8001/get_memories`にHTTPリクエスト
4. **記憶取得**: 実際の過去の会話データを取得
5. **文脈回答**: 記憶に基づく具体的な内容で回答
**技術的実証:**
```sh
🔧 [OpenAI] 1 tools called:
- get_memories({"limit":5})
🌐 [MCP] Executing get_memories...
[MCP] Result: [{'id': '5ce8f7d0-c078-43f1...
```
#### 📊 **統合アーキテクチャの完成**
```
OpenAI GPT-4o-mini
↓ (Function Calling)
MCP Client (aigpt conv)
↓ (HTTP API)
MCP Server (aigpt server:8001)
↓ (Direct Access)
Memory/Relationship Systems
JSON/SQLite Data
```
### 🎯 **技術的成果**
-**分散型AIシステム**: プロセス間MCP通信で複数AIアプリが記憶共有
-**OpenAI統合**: GPT-4o-miniのfunction callingが記憶システムと完全連携
-**プロフェッショナルUI**: enterprise-grade開発ツール風の情報表示
-**設定統合**: config.jsonからの自動設定読み込み
-**エラーハンドリング**: graceful shutdown、設定チェック、接続状態表示
### 📈 **ユーザー体験の向上**
- **開発者体験**: サーバー状況が一目で把握可能
- **デバッグ効率**: 詳細なログと状態表示
- **設定管理**: 設定ファイルパス、プロバイダー状態の明確化
- **AI連携**: OpenAI + MCP + 記憶システムのシームレス統合
**ai.gptの基盤アーキテクチャが完成し、実用的なAI記憶システムとして動作開始** 🚀
## 🔥 **NEW: Claude Code的継続開発機能** (2025/06/03 完成)
### 🚀 **プロジェクト管理システム完全実装**

36
config.json Normal file
View File

@ -0,0 +1,36 @@
{
"providers": {
"openai": {
"api_key": "",
"default_model": "gpt-4o-mini"
},
"ollama": {
"host": "http://127.0.0.1:11434",
"default_model": "qwen3"
}
},
"atproto": {
"handle": null,
"password": null,
"host": "https://bsky.social"
},
"default_provider": "openai",
"mcp": {
"servers": {
"ai_gpt": {
"base_url": "http://localhost:8001",
"name": "ai.gpt MCP Server",
"timeout": "10.0",
"endpoints": {
"get_memories": "/get_memories",
"search_memories": "/search_memories",
"get_contextual_memories": "/get_contextual_memories",
"get_relationship": "/get_relationship",
"process_interaction": "/process_interaction"
}
}
},
"enabled": "true",
"auto_detect": "true"
}
}

View File

@ -1,6 +1,7 @@
"""AI Provider integration for response generation"""
import os
import json
from typing import Optional, Dict, List, Any, Protocol
from abc import abstractmethod
import logging
@ -128,9 +129,9 @@ Recent memories:
class OpenAIProvider:
"""OpenAI API provider"""
"""OpenAI API provider with MCP function calling support"""
def __init__(self, model: str = "gpt-4o-mini", api_key: Optional[str] = None):
def __init__(self, model: str = "gpt-4o-mini", api_key: Optional[str] = None, mcp_client=None):
self.model = model
# Try to get API key from config first
config = Config()
@ -139,6 +140,90 @@ class OpenAIProvider:
raise ValueError("OpenAI API key not provided. Set it with: aigpt config set providers.openai.api_key YOUR_KEY")
self.client = OpenAI(api_key=self.api_key)
self.logger = logging.getLogger(__name__)
self.mcp_client = mcp_client # For MCP function calling
def _get_mcp_tools(self) -> List[Dict[str, Any]]:
"""Generate OpenAI tools from MCP endpoints"""
if not self.mcp_client or not self.mcp_client.available:
return []
tools = [
{
"type": "function",
"function": {
"name": "get_memories",
"description": "過去の会話記憶を取得します。「覚えている」「前回」「以前」などの質問で必ず使用してください",
"parameters": {
"type": "object",
"properties": {
"limit": {
"type": "integer",
"description": "取得する記憶の数",
"default": 5
}
}
}
}
},
{
"type": "function",
"function": {
"name": "search_memories",
"description": "特定のトピックについて話した記憶を検索します。「プログラミングについて」「○○について話した」などの質問で使用してください",
"parameters": {
"type": "object",
"properties": {
"keywords": {
"type": "array",
"items": {"type": "string"},
"description": "検索キーワードの配列"
}
},
"required": ["keywords"]
}
}
},
{
"type": "function",
"function": {
"name": "get_contextual_memories",
"description": "クエリに関連する文脈的記憶を取得します",
"parameters": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "検索クエリ"
},
"limit": {
"type": "integer",
"description": "取得する記憶の数",
"default": 5
}
},
"required": ["query"]
}
}
},
{
"type": "function",
"function": {
"name": "get_relationship",
"description": "特定ユーザーとの関係性情報を取得します",
"parameters": {
"type": "object",
"properties": {
"user_id": {
"type": "string",
"description": "ユーザーID"
}
},
"required": ["user_id"]
}
}
}
]
return tools
async def generate_response(
self,
@ -184,6 +269,127 @@ Recent memories:
self.logger.error(f"OpenAI generation failed: {e}")
return self._fallback_response(persona_state)
async def chat_with_mcp(self, prompt: str, max_tokens: int = 2000, user_id: str = "user") -> str:
"""Chat interface with MCP function calling support"""
if not self.mcp_client or not self.mcp_client.available:
return self.chat(prompt, max_tokens)
try:
# Prepare tools
tools = self._get_mcp_tools()
# Initial request with tools
response = self.client.chat.completions.create(
model=self.model,
messages=[
{"role": "system", "content": "あなたは記憶システムと関係性データにアクセスできます。過去の会話、記憶、関係性について質問された時は、必ずツールを使用して正確な情報を取得してください。「覚えている」「前回」「以前」「について話した」「関係」などのキーワードがあれば積極的にツールを使用してください。"},
{"role": "user", "content": prompt}
],
tools=tools,
tool_choice="auto",
max_tokens=max_tokens,
temperature=0.7
)
message = response.choices[0].message
# Handle tool calls
if message.tool_calls:
print(f"🔧 [OpenAI] {len(message.tool_calls)} tools called:")
for tc in message.tool_calls:
print(f" - {tc.function.name}({tc.function.arguments})")
messages = [
{"role": "system", "content": "必要に応じて利用可能なツールを使って、より正確で詳細な回答を提供してください。"},
{"role": "user", "content": prompt},
{
"role": "assistant",
"content": message.content,
"tool_calls": [tc.model_dump() for tc in message.tool_calls]
}
]
# Execute each tool call
for tool_call in message.tool_calls:
print(f"🌐 [MCP] Executing {tool_call.function.name}...")
tool_result = await self._execute_mcp_tool(tool_call, user_id)
print(f"✅ [MCP] Result: {str(tool_result)[:100]}...")
messages.append({
"role": "tool",
"tool_call_id": tool_call.id,
"name": tool_call.function.name,
"content": json.dumps(tool_result, ensure_ascii=False)
})
# Get final response with tool outputs
final_response = self.client.chat.completions.create(
model=self.model,
messages=messages,
max_tokens=max_tokens,
temperature=0.7
)
return final_response.choices[0].message.content
else:
return message.content
except Exception as e:
self.logger.error(f"OpenAI MCP chat failed: {e}")
return f"申し訳ありません。エラーが発生しました: {e}"
async def _execute_mcp_tool(self, tool_call, context_user_id: str = "user") -> Dict[str, Any]:
"""Execute MCP tool call"""
try:
import json
function_name = tool_call.function.name
arguments = json.loads(tool_call.function.arguments)
if function_name == "get_memories":
limit = arguments.get("limit", 5)
return await self.mcp_client.get_memories(limit) or {"error": "記憶の取得に失敗しました"}
elif function_name == "search_memories":
keywords = arguments.get("keywords", [])
return await self.mcp_client.search_memories(keywords) or {"error": "記憶の検索に失敗しました"}
elif function_name == "get_contextual_memories":
query = arguments.get("query", "")
limit = arguments.get("limit", 5)
return await self.mcp_client.get_contextual_memories(query, limit) or {"error": "文脈記憶の取得に失敗しました"}
elif function_name == "get_relationship":
# 引数のuser_idがない場合はコンテキストから取得
user_id = arguments.get("user_id", context_user_id)
if not user_id or user_id == "user":
user_id = context_user_id
# デバッグ用ログ
print(f"🔍 [DEBUG] get_relationship called with user_id: '{user_id}' (context: '{context_user_id}')")
result = await self.mcp_client.get_relationship(user_id)
print(f"🔍 [DEBUG] MCP result: {result}")
return result or {"error": "関係性の取得に失敗しました"}
else:
return {"error": f"未知のツール: {function_name}"}
except Exception as e:
return {"error": f"ツール実行エラー: {str(e)}"}
def chat(self, prompt: str, max_tokens: int = 2000) -> str:
"""Simple chat interface without MCP tools"""
try:
response = self.client.chat.completions.create(
model=self.model,
messages=[
{"role": "user", "content": prompt}
],
max_tokens=max_tokens,
temperature=0.7
)
return response.choices[0].message.content
except Exception as e:
self.logger.error(f"OpenAI chat failed: {e}")
return "I'm having trouble connecting to the AI model."
def _fallback_response(self, persona_state: PersonaState) -> str:
"""Fallback response based on mood"""
mood_responses = {
@ -196,7 +402,7 @@ Recent memories:
return mood_responses.get(persona_state.current_mood, "I see.")
def create_ai_provider(provider: str = "ollama", model: Optional[str] = None, **kwargs) -> AIProvider:
def create_ai_provider(provider: str = "ollama", model: Optional[str] = None, mcp_client=None, **kwargs) -> AIProvider:
"""Factory function to create AI providers"""
if provider == "ollama":
# Get model from config if not provided
@ -228,6 +434,6 @@ def create_ai_provider(provider: str = "ollama", model: Optional[str] = None, **
model = config.get('providers.openai.default_model', 'gpt-4o-mini')
except:
model = 'gpt-4o-mini' # Fallback to default
return OpenAIProvider(model=model, **kwargs)
return OpenAIProvider(model=model, mcp_client=mcp_client, **kwargs)
else:
raise ValueError(f"Unknown provider: {provider}")

View File

@ -2,13 +2,15 @@
import typer
from pathlib import Path
from typing import Optional
from typing import Optional, Dict, Any
from rich.console import Console
from rich.table import Table
from rich.panel import Panel
from datetime import datetime, timedelta
import subprocess
import shlex
import httpx
import asyncio
from prompt_toolkit import prompt as ptk_prompt
from prompt_toolkit.completion import WordCompleter, Completer, Completion
from prompt_toolkit.history import FileHistory
@ -30,6 +32,202 @@ config = Config()
DEFAULT_DATA_DIR = config.data_dir
class MCPClient:
"""Client for communicating with MCP server using config settings"""
def __init__(self, config: Optional[Config] = None):
self.config = config or Config()
self.enabled = self.config.get("mcp.enabled", True)
self.auto_detect = self.config.get("mcp.auto_detect", True)
self.servers = self.config.get("mcp.servers", {})
self.available = False
if self.enabled:
self._check_availability()
def _check_availability(self):
"""Check if any MCP server is available"""
self.available = False
if not self.enabled:
print(f"🚨 [MCP Client] MCP disabled in config")
return
print(f"🔍 [MCP Client] Checking availability...")
print(f"🔍 [MCP Client] Available servers: {list(self.servers.keys())}")
# Check ai.gpt server first (primary)
ai_gpt_config = self.servers.get("ai_gpt", {})
if ai_gpt_config:
base_url = ai_gpt_config.get("base_url", "http://localhost:8001")
timeout = ai_gpt_config.get("timeout", 5.0)
# Convert timeout to float if it's a string
if isinstance(timeout, str):
timeout = float(timeout)
print(f"🔍 [MCP Client] Testing ai_gpt server: {base_url} (timeout: {timeout})")
try:
import httpx
with httpx.Client(timeout=timeout) as client:
response = client.get(f"{base_url}/docs")
print(f"🔍 [MCP Client] ai_gpt response: {response.status_code}")
if response.status_code == 200:
self.available = True
self.active_server = "ai_gpt"
print(f"✅ [MCP Client] ai_gpt server connected successfully")
return
except Exception as e:
print(f"🚨 [MCP Client] ai_gpt connection failed: {e}")
else:
print(f"🚨 [MCP Client] No ai_gpt config found")
# If auto_detect is enabled, try to find any available server
if self.auto_detect:
print(f"🔍 [MCP Client] Auto-detect enabled, trying other servers...")
for server_name, server_config in self.servers.items():
base_url = server_config.get("base_url", "")
timeout = server_config.get("timeout", 5.0)
# Convert timeout to float if it's a string
if isinstance(timeout, str):
timeout = float(timeout)
print(f"🔍 [MCP Client] Testing {server_name}: {base_url} (timeout: {timeout})")
try:
import httpx
with httpx.Client(timeout=timeout) as client:
response = client.get(f"{base_url}/docs")
print(f"🔍 [MCP Client] {server_name} response: {response.status_code}")
if response.status_code == 200:
self.available = True
self.active_server = server_name
print(f"✅ [MCP Client] {server_name} server connected successfully")
return
except Exception as e:
print(f"🚨 [MCP Client] {server_name} connection failed: {e}")
print(f"🚨 [MCP Client] No MCP servers available")
def _get_url(self, endpoint_name: str) -> Optional[str]:
"""Get full URL for an endpoint"""
if not self.available or not hasattr(self, 'active_server'):
print(f"🚨 [MCP Client] Not available or no active server")
return None
server_config = self.servers.get(self.active_server, {})
base_url = server_config.get("base_url", "")
endpoints = server_config.get("endpoints", {})
endpoint_path = endpoints.get(endpoint_name, "")
print(f"🔍 [MCP Client] Server: {self.active_server}")
print(f"🔍 [MCP Client] Base URL: {base_url}")
print(f"🔍 [MCP Client] Endpoints: {list(endpoints.keys())}")
print(f"🔍 [MCP Client] Looking for: {endpoint_name}")
print(f"🔍 [MCP Client] Found path: {endpoint_path}")
if base_url and endpoint_path:
return f"{base_url}{endpoint_path}"
return None
def _get_timeout(self) -> float:
"""Get timeout for the active server"""
if not hasattr(self, 'active_server'):
return 5.0
server_config = self.servers.get(self.active_server, {})
timeout = server_config.get("timeout", 5.0)
# Convert timeout to float if it's a string
if isinstance(timeout, str):
timeout = float(timeout)
return timeout
async def get_memories(self, limit: int = 5) -> Optional[Dict[str, Any]]:
"""Get memories via MCP"""
url = self._get_url("get_memories")
if not url:
return None
try:
async with httpx.AsyncClient(timeout=self._get_timeout()) as client:
response = await client.get(f"{url}?limit={limit}")
return response.json() if response.status_code == 200 else None
except Exception:
return None
async def search_memories(self, keywords: list) -> Optional[Dict[str, Any]]:
"""Search memories via MCP"""
url = self._get_url("search_memories")
if not url:
return None
try:
async with httpx.AsyncClient(timeout=self._get_timeout()) as client:
response = await client.post(url, json={"keywords": keywords})
return response.json() if response.status_code == 200 else None
except Exception:
return None
async def get_contextual_memories(self, query: str, limit: int = 5) -> Optional[Dict[str, Any]]:
"""Get contextual memories via MCP"""
url = self._get_url("get_contextual_memories")
if not url:
return None
try:
async with httpx.AsyncClient(timeout=self._get_timeout()) as client:
response = await client.get(f"{url}?query={query}&limit={limit}")
return response.json() if response.status_code == 200 else None
except Exception:
return None
async def process_interaction(self, user_id: str, message: str) -> Optional[Dict[str, Any]]:
"""Process interaction via MCP"""
url = self._get_url("process_interaction")
if not url:
return None
try:
async with httpx.AsyncClient(timeout=self._get_timeout()) as client:
response = await client.post(url, json={"user_id": user_id, "message": message})
return response.json() if response.status_code == 200 else None
except Exception:
return None
async def get_relationship(self, user_id: str) -> Optional[Dict[str, Any]]:
"""Get relationship via MCP"""
url = self._get_url("get_relationship")
print(f"🔍 [MCP Client] get_relationship URL: {url}")
if not url:
print(f"🚨 [MCP Client] No URL found for get_relationship")
return None
try:
async with httpx.AsyncClient(timeout=self._get_timeout()) as client:
response = await client.get(f"{url}?user_id={user_id}")
print(f"🔍 [MCP Client] Response status: {response.status_code}")
if response.status_code == 200:
result = response.json()
print(f"🔍 [MCP Client] Response data: {result}")
return result
else:
print(f"🚨 [MCP Client] HTTP error: {response.status_code}")
return None
except Exception as e:
print(f"🚨 [MCP Client] Exception: {e}")
return None
def get_server_info(self) -> Dict[str, Any]:
"""Get information about the active MCP server"""
if not self.available or not hasattr(self, 'active_server'):
return {"available": False}
server_config = self.servers.get(self.active_server, {})
return {
"available": True,
"server_name": self.active_server,
"display_name": server_config.get("name", self.active_server),
"base_url": server_config.get("base_url", ""),
"timeout": server_config.get("timeout", 5.0),
"endpoints": len(server_config.get("endpoints", {}))
}
def get_persona(data_dir: Optional[Path] = None) -> Persona:
"""Get or create persona instance"""
if data_dir is None:
@ -226,10 +424,10 @@ def relationships(
@app.command()
def server(
host: str = typer.Option("localhost", "--host", "-h", help="Server host"),
port: int = typer.Option(8000, "--port", "-p", help="Server port"),
port: int = typer.Option(8001, "--port", "-p", help="Server port"),
data_dir: Optional[Path] = typer.Option(None, "--data-dir", "-d", help="Data directory"),
model: str = typer.Option("qwen2.5", "--model", "-m", help="AI model to use"),
provider: str = typer.Option("ollama", "--provider", help="AI provider (ollama/openai)")
model: Optional[str] = typer.Option(None, "--model", "-m", help="AI model to use"),
provider: Optional[str] = typer.Option(None, "--provider", help="AI provider (ollama/openai)")
):
"""Run MCP server for AI integration"""
import uvicorn
@ -239,26 +437,94 @@ def server(
data_dir.mkdir(parents=True, exist_ok=True)
# Get configuration
config_instance = Config()
# Get defaults from config if not provided
if not provider:
provider = config_instance.get("default_provider", "ollama")
if not model:
if provider == "ollama":
model = config_instance.get("providers.ollama.default_model", "qwen3:latest")
elif provider == "openai":
model = config_instance.get("providers.openai.default_model", "gpt-4o-mini")
else:
model = "qwen3:latest"
# Create MCP server
mcp_server = AIGptMcpServer(data_dir)
app_instance = mcp_server.app
# Get endpoint categories and count
total_routes = len(mcp_server.app.routes)
mcp_tools = total_routes - 2 # Exclude docs and openapi
# Categorize endpoints
memory_endpoints = ["get_memories", "search_memories", "get_contextual_memories", "create_summary", "create_core_memory"]
relationship_endpoints = ["get_relationship", "get_all_relationships", "process_interaction", "check_transmission_eligibility"]
system_endpoints = ["get_persona_state", "get_fortune", "run_maintenance"]
shell_endpoints = ["execute_command", "analyze_file", "write_file", "list_files", "read_project_file"]
remote_endpoints = ["remote_shell", "ai_bot_status", "isolated_python", "isolated_analysis"]
# Build endpoint summary
endpoint_summary = f"""🧠 Memory System: {len(memory_endpoints)} tools
🤝 Relationships: {len(relationship_endpoints)} tools
⚙️ System State: {len(system_endpoints)} tools
💻 Shell Integration: {len(shell_endpoints)} tools
🔒 Remote Execution: {len(remote_endpoints)} tools"""
# Check MCP client connectivity
mcp_client = MCPClient(config_instance)
mcp_status = "✅ MCP Client Ready" if mcp_client.available else "⚠️ MCP Client Disabled"
# Provider configuration check
provider_status = "✅ Ready"
if provider == "openai":
api_key = config_instance.get_api_key("openai")
if not api_key:
provider_status = "⚠️ No API Key"
elif provider == "ollama":
ollama_host = config_instance.get("providers.ollama.host", "http://localhost:11434")
provider_status = f"{ollama_host}"
console.print(Panel(
f"[cyan]Starting ai.gpt MCP Server[/cyan]\n\n"
f"Host: {host}:{port}\n"
f"Provider: {provider}\n"
f"Model: {model}\n"
f"Data: {data_dir}",
title="MCP Server",
border_style="green"
f"[bold cyan]🚀 ai.gpt MCP Server[/bold cyan]\n\n"
f"[green]Server Configuration:[/green]\n"
f"🌐 Address: http://{host}:{port}\n"
f"📋 API Docs: http://{host}:{port}/docs\n"
f"💾 Data Directory: {data_dir}\n\n"
f"[green]AI Provider Configuration:[/green]\n"
f"🤖 Provider: {provider} {provider_status}\n"
f"🧩 Model: {model}\n\n"
f"[green]MCP Tools Available ({mcp_tools} total):[/green]\n"
f"{endpoint_summary}\n\n"
f"[green]Integration Status:[/green]\n"
f"{mcp_status}\n"
f"🔗 Config: {config_instance.config_file}\n\n"
f"[dim]Press Ctrl+C to stop server[/dim]",
title="🔧 MCP Server Startup",
border_style="green",
expand=True
))
# Store provider info in app state for later use
app_instance.state.ai_provider = provider
app_instance.state.ai_model = model
app_instance.state.config = config_instance
# Run server
uvicorn.run(app_instance, host=host, port=port)
# Run server with better logging
try:
uvicorn.run(
app_instance,
host=host,
port=port,
log_level="info",
access_log=False # Reduce noise
)
except KeyboardInterrupt:
console.print("\n[yellow]🛑 MCP Server stopped[/yellow]")
except Exception as e:
console.print(f"\n[red]❌ Server error: {e}[/red]")
@app.command()
@ -869,7 +1135,8 @@ def config(
console.print("[red]Error: key required for get action[/red]")
return
val = config.get(key)
config_instance = Config()
val = config_instance.get(key)
if val is None:
console.print(f"[yellow]Key '{key}' not found[/yellow]")
else:
@ -880,13 +1147,14 @@ def config(
console.print("[red]Error: key and value required for set action[/red]")
return
config_instance = Config()
# Special handling for sensitive keys
if "password" in key or "api_key" in key:
console.print(f"[cyan]Setting {key}[/cyan] = [dim]***hidden***[/dim]")
else:
console.print(f"[cyan]Setting {key}[/cyan] = [green]{value}[/green]")
config.set(key, value)
config_instance.set(key, value)
console.print("[green]✓ Configuration saved[/green]")
elif action == "delete":
@ -894,7 +1162,8 @@ def config(
console.print("[red]Error: key required for delete action[/red]")
return
if config.delete(key):
config_instance = Config()
if config_instance.delete(key):
console.print(f"[green]✓ Deleted {key}[/green]")
else:
console.print(f"[yellow]Key '{key}' not found[/yellow]")
@ -986,7 +1255,9 @@ def conversation(
model: Optional[str] = typer.Option(None, "--model", "-m", help="AI model to use"),
provider: Optional[str] = typer.Option(None, "--provider", help="AI provider (ollama/openai)")
):
"""Simple continuous conversation mode"""
"""Simple continuous conversation mode with MCP support"""
# Initialize MCP client
mcp_client = MCPClient()
persona = get_persona(data_dir)
# Get defaults from config if not provided
@ -1001,35 +1272,49 @@ def conversation(
else:
model = "qwen3:latest" # fallback
# Create AI provider
# Create AI provider with MCP client
ai_provider = None
try:
ai_provider = create_ai_provider(provider=provider, model=model)
ai_provider = create_ai_provider(provider=provider, model=model, mcp_client=mcp_client)
console.print(f"[dim]Using {provider} with model {model}[/dim]")
except Exception as e:
console.print(f"[yellow]Warning: Could not create AI provider: {e}[/yellow]")
# MCP status
server_info = mcp_client.get_server_info()
if server_info["available"]:
console.print(f"[green]✓ MCP Server connected: {server_info['display_name']}[/green]")
console.print(f"[dim] URL: {server_info['base_url']} | Endpoints: {server_info['endpoints']}[/dim]")
else:
console.print(f"[yellow]⚠ MCP Server unavailable (running in local mode)[/yellow]")
# Welcome message
console.print(f"[cyan]Conversation with AI started. Type 'exit' or 'quit' to end.[/cyan]\n")
console.print(f"[cyan]Conversation with AI started. Type 'exit' or 'quit' to end.[/cyan]")
if server_info["available"]:
console.print(f"[dim]MCP commands: /memories, /search, /context, /relationship[/dim]\n")
else:
console.print()
# History for conversation mode
actual_data_dir = data_dir if data_dir else DEFAULT_DATA_DIR
history_file = actual_data_dir / "conversation_history.txt"
history = FileHistory(str(history_file))
# Custom completer for slash commands and phrases
# Custom completer for slash commands and phrases with MCP support
class ConversationCompleter(Completer):
def __init__(self):
self.slash_commands = ['/status', '/help', '/clear', '/exit', '/quit']
def __init__(self, mcp_available: bool = False):
self.basic_commands = ['/status', '/help', '/clear', '/exit', '/quit']
self.mcp_commands = ['/memories', '/search', '/context', '/relationship'] if mcp_available else []
self.phrases = ['こんにちは', '今日は', 'ありがとう', 'お疲れ様',
'どう思う?', 'どうですか?', '教えて', 'わかりました']
self.all_commands = self.basic_commands + self.mcp_commands
def get_completions(self, document, complete_event):
text = document.text_before_cursor
# If text starts with '/', complete slash commands
if text.startswith('/'):
for cmd in self.slash_commands:
for cmd in self.all_commands:
if cmd.startswith(text):
yield Completion(cmd, start_position=-len(text))
# For other text, complete phrases
@ -1038,7 +1323,7 @@ def conversation(
if phrase.startswith(text):
yield Completion(phrase, start_position=-len(text))
completer = ConversationCompleter()
completer = ConversationCompleter(mcp_client.available)
while True:
try:
@ -1076,6 +1361,12 @@ def conversation(
console.print(" /clear - Clear screen")
console.print(" /exit - End conversation")
console.print(" / - Show commands (same as /help)")
if mcp_client.available:
console.print(f"\n[cyan]MCP Commands:[/cyan]")
console.print(" /memories - Show recent memories")
console.print(" /search <keywords> - Search memories")
console.print(" /context <query> - Get contextual memories")
console.print(" /relationship - Show relationship via MCP")
console.print(" <message> - Chat with AI\n")
continue
@ -1083,7 +1374,73 @@ def conversation(
console.clear()
continue
# Process interaction
# MCP Commands
elif user_input.lower() == '/memories' and mcp_client.available:
memories = asyncio.run(mcp_client.get_memories(limit=5))
if memories:
console.print(f"\n[cyan]Recent Memories (via MCP):[/cyan]")
for i, mem in enumerate(memories[:5], 1):
console.print(f" {i}. [{mem.get('level', 'unknown')}] {mem.get('content', '')[:100]}...")
console.print("")
else:
console.print("[yellow]No memories found[/yellow]")
continue
elif user_input.lower().startswith('/search ') and mcp_client.available:
query = user_input[8:].strip()
if query:
keywords = query.split()
results = asyncio.run(mcp_client.search_memories(keywords))
if results:
console.print(f"\n[cyan]Memory Search Results for '{query}' (via MCP):[/cyan]")
for i, mem in enumerate(results[:5], 1):
console.print(f" {i}. {mem.get('content', '')[:100]}...")
console.print("")
else:
console.print(f"[yellow]No memories found for '{query}'[/yellow]")
else:
console.print("[red]Usage: /search <keywords>[/red]")
continue
elif user_input.lower().startswith('/context ') and mcp_client.available:
query = user_input[9:].strip()
if query:
results = asyncio.run(mcp_client.get_contextual_memories(query, limit=5))
if results:
console.print(f"\n[cyan]Contextual Memories for '{query}' (via MCP):[/cyan]")
for i, mem in enumerate(results[:5], 1):
console.print(f" {i}. {mem.get('content', '')[:100]}...")
console.print("")
else:
console.print(f"[yellow]No contextual memories found for '{query}'[/yellow]")
else:
console.print("[red]Usage: /context <query>[/red]")
continue
elif user_input.lower() == '/relationship' and mcp_client.available:
rel_data = asyncio.run(mcp_client.get_relationship(user_id))
if rel_data:
console.print(f"\n[cyan]Relationship (via MCP):[/cyan]")
console.print(f"Status: {rel_data.get('status', 'unknown')}")
console.print(f"Score: {rel_data.get('score', 0):.2f}")
console.print(f"Interactions: {rel_data.get('total_interactions', 0)}")
console.print("")
else:
console.print("[yellow]No relationship data found[/yellow]")
continue
# Process interaction - try MCP first, fallback to local
if mcp_client.available:
try:
mcp_result = asyncio.run(mcp_client.process_interaction(user_id, user_input))
if mcp_result and 'response' in mcp_result:
response = mcp_result['response']
console.print(f"AI> {response} [dim](via MCP)[/dim]\n")
continue
except Exception as e:
console.print(f"[yellow]MCP failed, using local: {e}[/yellow]")
# Fallback to local processing
response, relationship_delta = persona.process_interaction(user_id, user_input, ai_provider)
# Simple AI response display (no Panel, no extra info)

View File

@ -45,7 +45,44 @@ class Config:
},
"ollama": {
"host": "http://localhost:11434",
"default_model": "qwen2.5"
"default_model": "qwen3:latest"
}
},
"mcp": {
"enabled": True,
"auto_detect": True,
"servers": {
"ai_gpt": {
"name": "ai.gpt MCP Server",
"base_url": "http://localhost:8001",
"endpoints": {
"get_memories": "/get_memories",
"search_memories": "/search_memories",
"get_contextual_memories": "/get_contextual_memories",
"process_interaction": "/process_interaction",
"get_relationship": "/get_relationship",
"get_all_relationships": "/get_all_relationships",
"get_persona_state": "/get_persona_state",
"get_fortune": "/get_fortune",
"run_maintenance": "/run_maintenance",
"execute_command": "/execute_command",
"analyze_file": "/analyze_file",
"remote_shell": "/remote_shell",
"ai_bot_status": "/ai_bot_status"
},
"timeout": 10.0
},
"ai_card": {
"name": "ai.card MCP Server",
"base_url": "http://localhost:8000",
"endpoints": {
"health": "/health",
"get_user_cards": "/api/cards/user",
"gacha": "/api/gacha",
"sync_atproto": "/api/sync"
},
"timeout": 5.0
}
}
},
"atproto": {

View File

@ -160,6 +160,11 @@ AI:"""
# Generate response using AI with full context
try:
# Check if AI provider supports MCP
if hasattr(ai_provider, 'chat_with_mcp'):
import asyncio
response = asyncio.run(ai_provider.chat_with_mcp(context_prompt, max_tokens=2000, user_id=user_id))
else:
response = ai_provider.chat(context_prompt, max_tokens=2000)
# Clean up response if it includes the prompt echo