fix cli
This commit is contained in:
37
python_backup/pyproject.toml
Normal file
37
python_backup/pyproject.toml
Normal file
@@ -0,0 +1,37 @@
|
||||
[project]
|
||||
name = "aigpt"
|
||||
version = "0.1.0"
|
||||
description = "Autonomous transmission AI with unique personality based on relationship parameters"
|
||||
requires-python = ">=3.10"
|
||||
dependencies = [
|
||||
"click>=8.0.0",
|
||||
"typer>=0.9.0",
|
||||
"fastapi-mcp>=0.1.0",
|
||||
"pydantic>=2.0.0",
|
||||
"httpx>=0.24.0",
|
||||
"rich>=13.0.0",
|
||||
"python-dotenv>=1.0.0",
|
||||
"ollama>=0.1.0",
|
||||
"openai>=1.0.0",
|
||||
"uvicorn>=0.23.0",
|
||||
"apscheduler>=3.10.0",
|
||||
"croniter>=1.3.0",
|
||||
"prompt-toolkit>=3.0.0",
|
||||
# Documentation management
|
||||
"jinja2>=3.0.0",
|
||||
"gitpython>=3.1.0",
|
||||
"pathlib-extensions>=0.1.0",
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
aigpt = "aigpt.cli:app"
|
||||
|
||||
[build-system]
|
||||
requires = ["setuptools>=61.0", "wheel"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[tool.setuptools.packages.find]
|
||||
where = ["src"]
|
||||
|
||||
[tool.setuptools.package-data]
|
||||
aigpt = ["data/*.json"]
|
23
python_backup/setup_venv.sh
Executable file
23
python_backup/setup_venv.sh
Executable file
@@ -0,0 +1,23 @@
|
||||
#!/bin/zsh
|
||||
# Setup Python virtual environment in the new config directory
|
||||
|
||||
VENV_DIR="$HOME/.config/syui/ai/gpt/venv"
|
||||
|
||||
echo "Creating Python virtual environment at: $VENV_DIR"
|
||||
python -m venv "$VENV_DIR"
|
||||
|
||||
echo "Activating virtual environment..."
|
||||
source "$VENV_DIR/bin/activate"
|
||||
|
||||
echo "Installing aigpt package..."
|
||||
cd "$(dirname "$0")"
|
||||
pip install -e .
|
||||
|
||||
echo "Setup complete!"
|
||||
echo "To activate the virtual environment, run:"
|
||||
echo "source ~/.config/syui/ai/gpt/venv/bin/activate"
|
||||
|
||||
if [ -z "`$SHELL -i -c \"alias aigpt\"`" ]; then
|
||||
echo 'alias aigpt="$HOME/.config/syui/ai/gpt/venv/bin/aigpt"' >> ${HOME}/.$(basename $SHELL)rc
|
||||
exec $SHELL
|
||||
fi
|
21
python_backup/src/aigpt.egg-info/PKG-INFO
Normal file
21
python_backup/src/aigpt.egg-info/PKG-INFO
Normal file
@@ -0,0 +1,21 @@
|
||||
Metadata-Version: 2.4
|
||||
Name: aigpt
|
||||
Version: 0.1.0
|
||||
Summary: Autonomous transmission AI with unique personality based on relationship parameters
|
||||
Requires-Python: >=3.10
|
||||
Requires-Dist: click>=8.0.0
|
||||
Requires-Dist: typer>=0.9.0
|
||||
Requires-Dist: fastapi-mcp>=0.1.0
|
||||
Requires-Dist: pydantic>=2.0.0
|
||||
Requires-Dist: httpx>=0.24.0
|
||||
Requires-Dist: rich>=13.0.0
|
||||
Requires-Dist: python-dotenv>=1.0.0
|
||||
Requires-Dist: ollama>=0.1.0
|
||||
Requires-Dist: openai>=1.0.0
|
||||
Requires-Dist: uvicorn>=0.23.0
|
||||
Requires-Dist: apscheduler>=3.10.0
|
||||
Requires-Dist: croniter>=1.3.0
|
||||
Requires-Dist: prompt-toolkit>=3.0.0
|
||||
Requires-Dist: jinja2>=3.0.0
|
||||
Requires-Dist: gitpython>=3.1.0
|
||||
Requires-Dist: pathlib-extensions>=0.1.0
|
34
python_backup/src/aigpt.egg-info/SOURCES.txt
Normal file
34
python_backup/src/aigpt.egg-info/SOURCES.txt
Normal file
@@ -0,0 +1,34 @@
|
||||
README.md
|
||||
pyproject.toml
|
||||
src/aigpt/__init__.py
|
||||
src/aigpt/ai_provider.py
|
||||
src/aigpt/chatgpt_importer.py
|
||||
src/aigpt/cli.py
|
||||
src/aigpt/config.py
|
||||
src/aigpt/fortune.py
|
||||
src/aigpt/mcp_server.py
|
||||
src/aigpt/mcp_server_simple.py
|
||||
src/aigpt/memory.py
|
||||
src/aigpt/models.py
|
||||
src/aigpt/persona.py
|
||||
src/aigpt/project_manager.py
|
||||
src/aigpt/relationship.py
|
||||
src/aigpt/scheduler.py
|
||||
src/aigpt/transmission.py
|
||||
src/aigpt.egg-info/PKG-INFO
|
||||
src/aigpt.egg-info/SOURCES.txt
|
||||
src/aigpt.egg-info/dependency_links.txt
|
||||
src/aigpt.egg-info/entry_points.txt
|
||||
src/aigpt.egg-info/requires.txt
|
||||
src/aigpt.egg-info/top_level.txt
|
||||
src/aigpt/commands/docs.py
|
||||
src/aigpt/commands/submodules.py
|
||||
src/aigpt/commands/tokens.py
|
||||
src/aigpt/docs/__init__.py
|
||||
src/aigpt/docs/config.py
|
||||
src/aigpt/docs/git_utils.py
|
||||
src/aigpt/docs/templates.py
|
||||
src/aigpt/docs/utils.py
|
||||
src/aigpt/docs/wiki_generator.py
|
||||
src/aigpt/shared/__init__.py
|
||||
src/aigpt/shared/ai_provider.py
|
1
python_backup/src/aigpt.egg-info/dependency_links.txt
Normal file
1
python_backup/src/aigpt.egg-info/dependency_links.txt
Normal file
@@ -0,0 +1 @@
|
||||
|
2
python_backup/src/aigpt.egg-info/entry_points.txt
Normal file
2
python_backup/src/aigpt.egg-info/entry_points.txt
Normal file
@@ -0,0 +1,2 @@
|
||||
[console_scripts]
|
||||
aigpt = aigpt.cli:app
|
16
python_backup/src/aigpt.egg-info/requires.txt
Normal file
16
python_backup/src/aigpt.egg-info/requires.txt
Normal file
@@ -0,0 +1,16 @@
|
||||
click>=8.0.0
|
||||
typer>=0.9.0
|
||||
fastapi-mcp>=0.1.0
|
||||
pydantic>=2.0.0
|
||||
httpx>=0.24.0
|
||||
rich>=13.0.0
|
||||
python-dotenv>=1.0.0
|
||||
ollama>=0.1.0
|
||||
openai>=1.0.0
|
||||
uvicorn>=0.23.0
|
||||
apscheduler>=3.10.0
|
||||
croniter>=1.3.0
|
||||
prompt-toolkit>=3.0.0
|
||||
jinja2>=3.0.0
|
||||
gitpython>=3.1.0
|
||||
pathlib-extensions>=0.1.0
|
1
python_backup/src/aigpt.egg-info/top_level.txt
Normal file
1
python_backup/src/aigpt.egg-info/top_level.txt
Normal file
@@ -0,0 +1 @@
|
||||
aigpt
|
15
python_backup/src/aigpt/__init__.py
Normal file
15
python_backup/src/aigpt/__init__.py
Normal file
@@ -0,0 +1,15 @@
|
||||
"""ai.gpt - Autonomous transmission AI with unique personality"""
|
||||
|
||||
__version__ = "0.1.0"
|
||||
|
||||
from .memory import MemoryManager
|
||||
from .relationship import RelationshipTracker
|
||||
from .persona import Persona
|
||||
from .transmission import TransmissionController
|
||||
|
||||
__all__ = [
|
||||
"MemoryManager",
|
||||
"RelationshipTracker",
|
||||
"Persona",
|
||||
"TransmissionController",
|
||||
]
|
580
python_backup/src/aigpt/ai_provider.py
Normal file
580
python_backup/src/aigpt/ai_provider.py
Normal file
@@ -0,0 +1,580 @@
|
||||
"""AI Provider integration for response generation"""
|
||||
|
||||
import os
|
||||
import json
|
||||
from typing import Optional, Dict, List, Any, Protocol
|
||||
from abc import abstractmethod
|
||||
import logging
|
||||
import httpx
|
||||
from openai import OpenAI
|
||||
import ollama
|
||||
|
||||
from .models import PersonaState, Memory
|
||||
from .config import Config
|
||||
|
||||
|
||||
class AIProvider(Protocol):
|
||||
"""Protocol for AI providers"""
|
||||
|
||||
@abstractmethod
|
||||
async def generate_response(
|
||||
self,
|
||||
prompt: str,
|
||||
persona_state: PersonaState,
|
||||
memories: List[Memory],
|
||||
system_prompt: Optional[str] = None
|
||||
) -> str:
|
||||
"""Generate a response based on prompt and context"""
|
||||
pass
|
||||
|
||||
|
||||
class OllamaProvider:
|
||||
"""Ollama AI provider"""
|
||||
|
||||
def __init__(self, model: str = "qwen2.5", host: Optional[str] = None):
|
||||
self.model = model
|
||||
# Use environment variable OLLAMA_HOST if available, otherwise use config or default
|
||||
self.host = host or os.getenv('OLLAMA_HOST', 'http://127.0.0.1:11434')
|
||||
# Ensure proper URL format
|
||||
if not self.host.startswith('http'):
|
||||
self.host = f'http://{self.host}'
|
||||
self.client = ollama.Client(host=self.host, timeout=60.0) # 60秒タイムアウト
|
||||
self.logger = logging.getLogger(__name__)
|
||||
self.logger.info(f"OllamaProvider initialized with host: {self.host}, model: {self.model}")
|
||||
|
||||
# Load system prompt from config
|
||||
try:
|
||||
config = Config()
|
||||
self.config_system_prompt = config.get('providers.ollama.system_prompt')
|
||||
except:
|
||||
self.config_system_prompt = None
|
||||
|
||||
async def generate_response(
|
||||
self,
|
||||
prompt: str,
|
||||
persona_state: PersonaState,
|
||||
memories: List[Memory],
|
||||
system_prompt: Optional[str] = None
|
||||
) -> str:
|
||||
"""Generate response using Ollama"""
|
||||
|
||||
# Build context from memories
|
||||
memory_context = "\n".join([
|
||||
f"[{mem.level.value}] {mem.content[:200]}..."
|
||||
for mem in memories[:5]
|
||||
])
|
||||
|
||||
# Build personality context
|
||||
personality_desc = ", ".join([
|
||||
f"{trait}: {value:.1f}"
|
||||
for trait, value in persona_state.base_personality.items()
|
||||
])
|
||||
|
||||
# System prompt with persona context
|
||||
full_system_prompt = f"""You are an AI with the following characteristics:
|
||||
Current mood: {persona_state.current_mood}
|
||||
Fortune today: {persona_state.fortune.fortune_value}/10
|
||||
Personality traits: {personality_desc}
|
||||
|
||||
Recent memories:
|
||||
{memory_context}
|
||||
|
||||
{system_prompt or self.config_system_prompt or 'Respond naturally based on your current state and memories.'}"""
|
||||
|
||||
try:
|
||||
response = self.client.chat(
|
||||
model=self.model,
|
||||
messages=[
|
||||
{"role": "system", "content": full_system_prompt},
|
||||
{"role": "user", "content": prompt}
|
||||
]
|
||||
)
|
||||
return self._clean_response(response['message']['content'])
|
||||
except Exception as e:
|
||||
self.logger.error(f"Ollama generation failed: {e}")
|
||||
return self._fallback_response(persona_state)
|
||||
|
||||
def chat(self, prompt: str, max_tokens: int = 2000) -> str:
|
||||
"""Simple chat interface"""
|
||||
try:
|
||||
messages = []
|
||||
if self.config_system_prompt:
|
||||
messages.append({"role": "system", "content": self.config_system_prompt})
|
||||
messages.append({"role": "user", "content": prompt})
|
||||
|
||||
response = self.client.chat(
|
||||
model=self.model,
|
||||
messages=messages,
|
||||
options={
|
||||
"num_predict": max_tokens,
|
||||
"temperature": 0.7,
|
||||
"top_p": 0.9,
|
||||
},
|
||||
stream=False # ストリーミング無効化で安定性向上
|
||||
)
|
||||
return self._clean_response(response['message']['content'])
|
||||
except Exception as e:
|
||||
self.logger.error(f"Ollama chat failed (host: {self.host}): {e}")
|
||||
return "I'm having trouble connecting to the AI model."
|
||||
|
||||
def _clean_response(self, response: str) -> str:
|
||||
"""Clean response by removing think tags and other unwanted content"""
|
||||
import re
|
||||
# Remove <think></think> tags and their content
|
||||
response = re.sub(r'<think>.*?</think>', '', response, flags=re.DOTALL)
|
||||
# Remove any remaining whitespace at the beginning/end
|
||||
response = response.strip()
|
||||
return response
|
||||
|
||||
def _fallback_response(self, persona_state: PersonaState) -> str:
|
||||
"""Fallback response based on mood"""
|
||||
mood_responses = {
|
||||
"joyful": "That's wonderful! I'm feeling great today!",
|
||||
"cheerful": "That sounds nice!",
|
||||
"neutral": "I understand.",
|
||||
"melancholic": "I see... That's something to think about.",
|
||||
"contemplative": "Hmm, let me consider that..."
|
||||
}
|
||||
return mood_responses.get(persona_state.current_mood, "I see.")
|
||||
|
||||
|
||||
class OpenAIProvider:
|
||||
"""OpenAI API provider with MCP function calling support"""
|
||||
|
||||
def __init__(self, model: str = "gpt-4o-mini", api_key: Optional[str] = None, mcp_client=None):
|
||||
self.model = model
|
||||
# Try to get API key from config first
|
||||
config = Config()
|
||||
self.api_key = api_key or config.get_api_key("openai") or os.getenv("OPENAI_API_KEY")
|
||||
if not self.api_key:
|
||||
raise ValueError("OpenAI API key not provided. Set it with: aigpt config set providers.openai.api_key YOUR_KEY")
|
||||
self.client = OpenAI(api_key=self.api_key)
|
||||
self.logger = logging.getLogger(__name__)
|
||||
self.mcp_client = mcp_client # For MCP function calling
|
||||
|
||||
# Load system prompt from config
|
||||
try:
|
||||
self.config_system_prompt = config.get('providers.openai.system_prompt')
|
||||
except:
|
||||
self.config_system_prompt = None
|
||||
|
||||
def _get_mcp_tools(self) -> List[Dict[str, Any]]:
|
||||
"""Generate OpenAI tools from MCP endpoints"""
|
||||
if not self.mcp_client or not self.mcp_client.available:
|
||||
return []
|
||||
|
||||
tools = [
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "get_memories",
|
||||
"description": "過去の会話記憶を取得します。「覚えている」「前回」「以前」などの質問で必ず使用してください",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"limit": {
|
||||
"type": "integer",
|
||||
"description": "取得する記憶の数",
|
||||
"default": 5
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "search_memories",
|
||||
"description": "特定のトピックについて話した記憶を検索します。「プログラミングについて」「○○について話した」などの質問で使用してください",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"keywords": {
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
"description": "検索キーワードの配列"
|
||||
}
|
||||
},
|
||||
"required": ["keywords"]
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "get_contextual_memories",
|
||||
"description": "クエリに関連する文脈的記憶を取得します",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"query": {
|
||||
"type": "string",
|
||||
"description": "検索クエリ"
|
||||
},
|
||||
"limit": {
|
||||
"type": "integer",
|
||||
"description": "取得する記憶の数",
|
||||
"default": 5
|
||||
}
|
||||
},
|
||||
"required": ["query"]
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "get_relationship",
|
||||
"description": "特定ユーザーとの関係性情報を取得します",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"user_id": {
|
||||
"type": "string",
|
||||
"description": "ユーザーID"
|
||||
}
|
||||
},
|
||||
"required": ["user_id"]
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
# Add ai.card tools if available
|
||||
if hasattr(self.mcp_client, 'has_card_tools') and self.mcp_client.has_card_tools:
|
||||
card_tools = [
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "card_get_user_cards",
|
||||
"description": "ユーザーが所有するカードの一覧を取得します",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"did": {
|
||||
"type": "string",
|
||||
"description": "ユーザーのDID"
|
||||
},
|
||||
"limit": {
|
||||
"type": "integer",
|
||||
"description": "取得するカード数の上限",
|
||||
"default": 10
|
||||
}
|
||||
},
|
||||
"required": ["did"]
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "card_draw_card",
|
||||
"description": "ガチャを引いてカードを取得します",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"did": {
|
||||
"type": "string",
|
||||
"description": "ユーザーのDID"
|
||||
},
|
||||
"is_paid": {
|
||||
"type": "boolean",
|
||||
"description": "有料ガチャかどうか",
|
||||
"default": False
|
||||
}
|
||||
},
|
||||
"required": ["did"]
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "card_analyze_collection",
|
||||
"description": "ユーザーのカードコレクションを分析します",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"did": {
|
||||
"type": "string",
|
||||
"description": "ユーザーのDID"
|
||||
}
|
||||
},
|
||||
"required": ["did"]
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "card_get_gacha_stats",
|
||||
"description": "ガチャの統計情報を取得します",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
tools.extend(card_tools)
|
||||
|
||||
return tools
|
||||
|
||||
async def generate_response(
|
||||
self,
|
||||
prompt: str,
|
||||
persona_state: PersonaState,
|
||||
memories: List[Memory],
|
||||
system_prompt: Optional[str] = None
|
||||
) -> str:
|
||||
"""Generate response using OpenAI"""
|
||||
|
||||
# Build context similar to Ollama
|
||||
memory_context = "\n".join([
|
||||
f"[{mem.level.value}] {mem.content[:200]}..."
|
||||
for mem in memories[:5]
|
||||
])
|
||||
|
||||
personality_desc = ", ".join([
|
||||
f"{trait}: {value:.1f}"
|
||||
for trait, value in persona_state.base_personality.items()
|
||||
])
|
||||
|
||||
full_system_prompt = f"""You are an AI with unique personality traits and memories.
|
||||
Current mood: {persona_state.current_mood}
|
||||
Fortune today: {persona_state.fortune.fortune_value}/10
|
||||
Personality traits: {personality_desc}
|
||||
|
||||
Recent memories:
|
||||
{memory_context}
|
||||
|
||||
{system_prompt or self.config_system_prompt or 'Respond naturally based on your current state and memories. Be authentic to your mood and personality.'}"""
|
||||
|
||||
try:
|
||||
response = self.client.chat.completions.create(
|
||||
model=self.model,
|
||||
messages=[
|
||||
{"role": "system", "content": full_system_prompt},
|
||||
{"role": "user", "content": prompt}
|
||||
],
|
||||
temperature=0.7 + (persona_state.fortune.fortune_value - 5) * 0.05 # Vary by fortune
|
||||
)
|
||||
return response.choices[0].message.content
|
||||
except Exception as e:
|
||||
self.logger.error(f"OpenAI generation failed: {e}")
|
||||
return self._fallback_response(persona_state)
|
||||
|
||||
async def chat_with_mcp(self, prompt: str, max_tokens: int = 2000, user_id: str = "user") -> str:
|
||||
"""Chat interface with MCP function calling support"""
|
||||
if not self.mcp_client or not self.mcp_client.available:
|
||||
return self.chat(prompt, max_tokens)
|
||||
|
||||
try:
|
||||
# Prepare tools
|
||||
tools = self._get_mcp_tools()
|
||||
|
||||
# Initial request with tools
|
||||
response = self.client.chat.completions.create(
|
||||
model=self.model,
|
||||
messages=[
|
||||
{"role": "system", "content": self.config_system_prompt or "あなたは記憶システムと関係性データ、カードゲームシステムにアクセスできます。過去の会話、記憶、関係性について質問された時は、必ずツールを使用して正確な情報を取得してください。「覚えている」「前回」「以前」「について話した」「関係」などのキーワードがあれば積極的にツールを使用してください。カード関連の質問(「カード」「コレクション」「ガチャ」「見せて」「持っている」など)では、必ずcard_get_user_cardsやcard_analyze_collectionなどのツールを使用してください。didパラメータには現在会話しているユーザーのID(例:'syui')を使用してください。"},
|
||||
{"role": "user", "content": prompt}
|
||||
],
|
||||
tools=tools,
|
||||
tool_choice="auto",
|
||||
max_tokens=max_tokens,
|
||||
temperature=0.7
|
||||
)
|
||||
|
||||
message = response.choices[0].message
|
||||
|
||||
# Handle tool calls
|
||||
if message.tool_calls:
|
||||
print(f"🔧 [OpenAI] {len(message.tool_calls)} tools called:")
|
||||
for tc in message.tool_calls:
|
||||
print(f" - {tc.function.name}({tc.function.arguments})")
|
||||
|
||||
messages = [
|
||||
{"role": "system", "content": self.config_system_prompt or "必要に応じて利用可能なツールを使って、より正確で詳細な回答を提供してください。"},
|
||||
{"role": "user", "content": prompt},
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": message.content,
|
||||
"tool_calls": [tc.model_dump() for tc in message.tool_calls]
|
||||
}
|
||||
]
|
||||
|
||||
# Execute each tool call
|
||||
for tool_call in message.tool_calls:
|
||||
print(f"🌐 [MCP] Executing {tool_call.function.name}...")
|
||||
tool_result = await self._execute_mcp_tool(tool_call, user_id)
|
||||
print(f"✅ [MCP] Result: {str(tool_result)[:100]}...")
|
||||
messages.append({
|
||||
"role": "tool",
|
||||
"tool_call_id": tool_call.id,
|
||||
"name": tool_call.function.name,
|
||||
"content": json.dumps(tool_result, ensure_ascii=False)
|
||||
})
|
||||
|
||||
# Get final response with tool outputs
|
||||
final_response = self.client.chat.completions.create(
|
||||
model=self.model,
|
||||
messages=messages,
|
||||
max_tokens=max_tokens,
|
||||
temperature=0.7
|
||||
)
|
||||
|
||||
return final_response.choices[0].message.content
|
||||
else:
|
||||
return message.content
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f"OpenAI MCP chat failed: {e}")
|
||||
return f"申し訳ありません。エラーが発生しました: {e}"
|
||||
|
||||
async def _execute_mcp_tool(self, tool_call, context_user_id: str = "user") -> Dict[str, Any]:
|
||||
"""Execute MCP tool call"""
|
||||
try:
|
||||
import json
|
||||
function_name = tool_call.function.name
|
||||
arguments = json.loads(tool_call.function.arguments)
|
||||
|
||||
if function_name == "get_memories":
|
||||
limit = arguments.get("limit", 5)
|
||||
return await self.mcp_client.get_memories(limit) or {"error": "記憶の取得に失敗しました"}
|
||||
|
||||
elif function_name == "search_memories":
|
||||
keywords = arguments.get("keywords", [])
|
||||
return await self.mcp_client.search_memories(keywords) or {"error": "記憶の検索に失敗しました"}
|
||||
|
||||
elif function_name == "get_contextual_memories":
|
||||
query = arguments.get("query", "")
|
||||
limit = arguments.get("limit", 5)
|
||||
return await self.mcp_client.get_contextual_memories(query, limit) or {"error": "文脈記憶の取得に失敗しました"}
|
||||
|
||||
elif function_name == "get_relationship":
|
||||
# 引数のuser_idがない場合はコンテキストから取得
|
||||
user_id = arguments.get("user_id", context_user_id)
|
||||
if not user_id or user_id == "user":
|
||||
user_id = context_user_id
|
||||
# デバッグ用ログ
|
||||
print(f"🔍 [DEBUG] get_relationship called with user_id: '{user_id}' (context: '{context_user_id}')")
|
||||
result = await self.mcp_client.get_relationship(user_id)
|
||||
print(f"🔍 [DEBUG] MCP result: {result}")
|
||||
return result or {"error": "関係性の取得に失敗しました"}
|
||||
|
||||
# ai.card tools
|
||||
elif function_name == "card_get_user_cards":
|
||||
did = arguments.get("did", context_user_id)
|
||||
limit = arguments.get("limit", 10)
|
||||
result = await self.mcp_client.card_get_user_cards(did, limit)
|
||||
# Check if ai.card server is not running
|
||||
if result and result.get("error") == "ai.card server is not running":
|
||||
return {
|
||||
"error": "ai.cardサーバーが起動していません",
|
||||
"message": "カードシステムを使用するには、別のターミナルで以下のコマンドを実行してください:\ncd card && ./start_server.sh"
|
||||
}
|
||||
return result or {"error": "カード一覧の取得に失敗しました"}
|
||||
|
||||
elif function_name == "card_draw_card":
|
||||
did = arguments.get("did", context_user_id)
|
||||
is_paid = arguments.get("is_paid", False)
|
||||
result = await self.mcp_client.card_draw_card(did, is_paid)
|
||||
if result and result.get("error") == "ai.card server is not running":
|
||||
return {
|
||||
"error": "ai.cardサーバーが起動していません",
|
||||
"message": "カードシステムを使用するには、別のターミナルで以下のコマンドを実行してください:\ncd card && ./start_server.sh"
|
||||
}
|
||||
return result or {"error": "ガチャに失敗しました"}
|
||||
|
||||
elif function_name == "card_analyze_collection":
|
||||
did = arguments.get("did", context_user_id)
|
||||
result = await self.mcp_client.card_analyze_collection(did)
|
||||
if result and result.get("error") == "ai.card server is not running":
|
||||
return {
|
||||
"error": "ai.cardサーバーが起動していません",
|
||||
"message": "カードシステムを使用するには、別のターミナルで以下のコマンドを実行してください:\ncd card && ./start_server.sh"
|
||||
}
|
||||
return result or {"error": "コレクション分析に失敗しました"}
|
||||
|
||||
elif function_name == "card_get_gacha_stats":
|
||||
result = await self.mcp_client.card_get_gacha_stats()
|
||||
if result and result.get("error") == "ai.card server is not running":
|
||||
return {
|
||||
"error": "ai.cardサーバーが起動していません",
|
||||
"message": "カードシステムを使用するには、別のターミナルで以下のコマンドを実行してください:\ncd card && ./start_server.sh"
|
||||
}
|
||||
return result or {"error": "ガチャ統計の取得に失敗しました"}
|
||||
|
||||
else:
|
||||
return {"error": f"未知のツール: {function_name}"}
|
||||
|
||||
except Exception as e:
|
||||
return {"error": f"ツール実行エラー: {str(e)}"}
|
||||
|
||||
def chat(self, prompt: str, max_tokens: int = 2000) -> str:
|
||||
"""Simple chat interface without MCP tools"""
|
||||
try:
|
||||
messages = []
|
||||
if self.config_system_prompt:
|
||||
messages.append({"role": "system", "content": self.config_system_prompt})
|
||||
messages.append({"role": "user", "content": prompt})
|
||||
|
||||
response = self.client.chat.completions.create(
|
||||
model=self.model,
|
||||
messages=messages,
|
||||
max_tokens=max_tokens,
|
||||
temperature=0.7
|
||||
)
|
||||
return response.choices[0].message.content
|
||||
except Exception as e:
|
||||
self.logger.error(f"OpenAI chat failed: {e}")
|
||||
return "I'm having trouble connecting to the AI model."
|
||||
|
||||
def _fallback_response(self, persona_state: PersonaState) -> str:
|
||||
"""Fallback response based on mood"""
|
||||
mood_responses = {
|
||||
"joyful": "What a delightful conversation!",
|
||||
"cheerful": "That's interesting!",
|
||||
"neutral": "I understand what you mean.",
|
||||
"melancholic": "I've been thinking about that too...",
|
||||
"contemplative": "That gives me something to ponder..."
|
||||
}
|
||||
return mood_responses.get(persona_state.current_mood, "I see.")
|
||||
|
||||
|
||||
def create_ai_provider(provider: str = "ollama", model: Optional[str] = None, mcp_client=None, **kwargs) -> AIProvider:
|
||||
"""Factory function to create AI providers"""
|
||||
if provider == "ollama":
|
||||
# Get model from config if not provided
|
||||
if model is None:
|
||||
try:
|
||||
from .config import Config
|
||||
config = Config()
|
||||
model = config.get('providers.ollama.default_model', 'qwen2.5')
|
||||
except:
|
||||
model = 'qwen2.5' # Fallback to default
|
||||
|
||||
# Try to get host from config if not provided in kwargs
|
||||
if 'host' not in kwargs:
|
||||
try:
|
||||
from .config import Config
|
||||
config = Config()
|
||||
config_host = config.get('providers.ollama.host')
|
||||
if config_host:
|
||||
kwargs['host'] = config_host
|
||||
except:
|
||||
pass # Use environment variable or default
|
||||
return OllamaProvider(model=model, **kwargs)
|
||||
elif provider == "openai":
|
||||
# Get model from config if not provided
|
||||
if model is None:
|
||||
try:
|
||||
from .config import Config
|
||||
config = Config()
|
||||
model = config.get('providers.openai.default_model', 'gpt-4o-mini')
|
||||
except:
|
||||
model = 'gpt-4o-mini' # Fallback to default
|
||||
return OpenAIProvider(model=model, mcp_client=mcp_client, **kwargs)
|
||||
else:
|
||||
raise ValueError(f"Unknown provider: {provider}")
|
192
python_backup/src/aigpt/chatgpt_importer.py
Normal file
192
python_backup/src/aigpt/chatgpt_importer.py
Normal file
@@ -0,0 +1,192 @@
|
||||
"""ChatGPT conversation data importer for ai.gpt"""
|
||||
|
||||
import json
|
||||
import uuid
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Any, Optional
|
||||
import logging
|
||||
|
||||
from .models import Memory, MemoryLevel, Conversation
|
||||
from .memory import MemoryManager
|
||||
from .relationship import RelationshipTracker
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ChatGPTImporter:
|
||||
"""Import ChatGPT conversation data into ai.gpt memory system"""
|
||||
|
||||
def __init__(self, data_dir: Path):
|
||||
self.data_dir = data_dir
|
||||
self.memory_manager = MemoryManager(data_dir)
|
||||
self.relationship_tracker = RelationshipTracker(data_dir)
|
||||
|
||||
def import_from_file(self, file_path: Path, user_id: str = "chatgpt_user") -> Dict[str, Any]:
|
||||
"""Import ChatGPT conversations from JSON file
|
||||
|
||||
Args:
|
||||
file_path: Path to ChatGPT export JSON file
|
||||
user_id: User ID to associate with imported conversations
|
||||
|
||||
Returns:
|
||||
Dict with import statistics
|
||||
"""
|
||||
try:
|
||||
with open(file_path, 'r', encoding='utf-8') as f:
|
||||
chatgpt_data = json.load(f)
|
||||
|
||||
return self._import_conversations(chatgpt_data, user_id)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to import ChatGPT data: {e}")
|
||||
raise
|
||||
|
||||
def _import_conversations(self, chatgpt_data: List[Dict], user_id: str) -> Dict[str, Any]:
|
||||
"""Import multiple conversations from ChatGPT data"""
|
||||
stats = {
|
||||
"conversations_imported": 0,
|
||||
"messages_imported": 0,
|
||||
"user_messages": 0,
|
||||
"assistant_messages": 0,
|
||||
"skipped_messages": 0
|
||||
}
|
||||
|
||||
for conversation_data in chatgpt_data:
|
||||
try:
|
||||
conv_stats = self._import_single_conversation(conversation_data, user_id)
|
||||
|
||||
# Update overall stats
|
||||
stats["conversations_imported"] += 1
|
||||
stats["messages_imported"] += conv_stats["messages"]
|
||||
stats["user_messages"] += conv_stats["user_messages"]
|
||||
stats["assistant_messages"] += conv_stats["assistant_messages"]
|
||||
stats["skipped_messages"] += conv_stats["skipped"]
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to import conversation '{conversation_data.get('title', 'Unknown')}': {e}")
|
||||
continue
|
||||
|
||||
logger.info(f"Import completed: {stats}")
|
||||
return stats
|
||||
|
||||
def _import_single_conversation(self, conversation_data: Dict, user_id: str) -> Dict[str, int]:
|
||||
"""Import a single conversation from ChatGPT"""
|
||||
title = conversation_data.get("title", "Untitled")
|
||||
create_time = conversation_data.get("create_time")
|
||||
mapping = conversation_data.get("mapping", {})
|
||||
|
||||
stats = {"messages": 0, "user_messages": 0, "assistant_messages": 0, "skipped": 0}
|
||||
|
||||
# Extract messages in chronological order
|
||||
messages = self._extract_messages_from_mapping(mapping)
|
||||
|
||||
for msg in messages:
|
||||
try:
|
||||
role = msg["author"]["role"]
|
||||
content = self._extract_content(msg["content"])
|
||||
create_time_msg = msg.get("create_time")
|
||||
|
||||
if not content or role not in ["user", "assistant"]:
|
||||
stats["skipped"] += 1
|
||||
continue
|
||||
|
||||
# Convert to ai.gpt format
|
||||
if role == "user":
|
||||
# User message - create memory entry
|
||||
self._add_user_message(user_id, content, create_time_msg, title)
|
||||
stats["user_messages"] += 1
|
||||
|
||||
elif role == "assistant":
|
||||
# Assistant message - create AI response memory
|
||||
self._add_assistant_message(user_id, content, create_time_msg, title)
|
||||
stats["assistant_messages"] += 1
|
||||
|
||||
stats["messages"] += 1
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to process message in '{title}': {e}")
|
||||
stats["skipped"] += 1
|
||||
continue
|
||||
|
||||
logger.info(f"Imported conversation '{title}': {stats}")
|
||||
return stats
|
||||
|
||||
def _extract_messages_from_mapping(self, mapping: Dict) -> List[Dict]:
|
||||
"""Extract messages from ChatGPT mapping structure in chronological order"""
|
||||
messages = []
|
||||
|
||||
for node_id, node_data in mapping.items():
|
||||
message = node_data.get("message")
|
||||
if message and message.get("author", {}).get("role") in ["user", "assistant"]:
|
||||
# Skip system messages and hidden messages
|
||||
metadata = message.get("metadata", {})
|
||||
if not metadata.get("is_visually_hidden_from_conversation", False):
|
||||
messages.append(message)
|
||||
|
||||
# Sort by create_time if available
|
||||
messages.sort(key=lambda x: x.get("create_time") or 0)
|
||||
return messages
|
||||
|
||||
def _extract_content(self, content_data: Dict) -> Optional[str]:
|
||||
"""Extract text content from ChatGPT content structure"""
|
||||
if not content_data:
|
||||
return None
|
||||
|
||||
content_type = content_data.get("content_type")
|
||||
|
||||
if content_type == "text":
|
||||
parts = content_data.get("parts", [])
|
||||
if parts and parts[0]:
|
||||
return parts[0].strip()
|
||||
|
||||
elif content_type == "user_editable_context":
|
||||
# User context/instructions
|
||||
user_instructions = content_data.get("user_instructions", "")
|
||||
if user_instructions:
|
||||
return f"[User Context] {user_instructions}"
|
||||
|
||||
return None
|
||||
|
||||
def _add_user_message(self, user_id: str, content: str, create_time: Optional[float], conversation_title: str):
|
||||
"""Add user message to ai.gpt memory system"""
|
||||
timestamp = datetime.fromtimestamp(create_time) if create_time else datetime.now()
|
||||
|
||||
# Create conversation record
|
||||
conversation = Conversation(
|
||||
id=str(uuid.uuid4()),
|
||||
user_id=user_id,
|
||||
user_message=content,
|
||||
ai_response="", # Will be filled by next assistant message
|
||||
timestamp=timestamp,
|
||||
context={"source": "chatgpt_import", "conversation_title": conversation_title}
|
||||
)
|
||||
|
||||
# Add to memory with CORE level (imported data is important)
|
||||
memory = Memory(
|
||||
id=str(uuid.uuid4()),
|
||||
timestamp=timestamp,
|
||||
content=content,
|
||||
level=MemoryLevel.CORE,
|
||||
importance_score=0.8 # High importance for imported data
|
||||
)
|
||||
|
||||
self.memory_manager.add_memory(memory)
|
||||
|
||||
# Update relationship (positive interaction)
|
||||
self.relationship_tracker.update_interaction(user_id, 1.0)
|
||||
|
||||
def _add_assistant_message(self, user_id: str, content: str, create_time: Optional[float], conversation_title: str):
|
||||
"""Add assistant message to ai.gpt memory system"""
|
||||
timestamp = datetime.fromtimestamp(create_time) if create_time else datetime.now()
|
||||
|
||||
# Add assistant response as memory (AI's own responses can inform future behavior)
|
||||
memory = Memory(
|
||||
id=str(uuid.uuid4()),
|
||||
timestamp=timestamp,
|
||||
content=f"[AI Response] {content}",
|
||||
level=MemoryLevel.SUMMARY,
|
||||
importance_score=0.6 # Medium importance for AI responses
|
||||
)
|
||||
|
||||
self.memory_manager.add_memory(memory)
|
1596
python_backup/src/aigpt/cli.py
Normal file
1596
python_backup/src/aigpt/cli.py
Normal file
File diff suppressed because it is too large
Load Diff
729
python_backup/src/aigpt/commands/docs.py
Normal file
729
python_backup/src/aigpt/commands/docs.py
Normal file
@@ -0,0 +1,729 @@
|
||||
"""Documentation management commands for ai.gpt."""
|
||||
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
import typer
|
||||
from rich.console import Console
|
||||
from rich.panel import Panel
|
||||
from rich.progress import track
|
||||
from rich.table import Table
|
||||
|
||||
from ..docs.config import get_ai_root, load_docs_config
|
||||
from ..docs.templates import DocumentationTemplateManager
|
||||
from ..docs.git_utils import ensure_submodules_available
|
||||
from ..docs.wiki_generator import WikiGenerator
|
||||
from ..docs.utils import (
|
||||
ProgressManager,
|
||||
count_lines,
|
||||
find_project_directories,
|
||||
format_file_size,
|
||||
safe_write_file,
|
||||
validate_project_name,
|
||||
)
|
||||
|
||||
console = Console()
|
||||
docs_app = typer.Typer(help="Documentation management for AI ecosystem")
|
||||
|
||||
|
||||
@docs_app.command("generate")
|
||||
def generate_docs(
|
||||
project: str = typer.Option(..., "--project", "-p", help="Project name (os, gpt, card, etc.)"),
|
||||
output: Path = typer.Option(Path("./claude.md"), "--output", "-o", help="Output file path"),
|
||||
include: str = typer.Option("core,specific", "--include", "-i", help="Components to include"),
|
||||
dir: Optional[Path] = typer.Option(None, "--dir", "-d", help="AI ecosystem root directory"),
|
||||
auto_pull: bool = typer.Option(True, "--auto-pull/--no-auto-pull", help="Automatically pull missing submodules"),
|
||||
ai_gpt_integration: bool = typer.Option(False, "--ai-gpt-integration", help="Enable ai.gpt integration"),
|
||||
dry_run: bool = typer.Option(False, "--dry-run", help="Show what would be generated without writing files"),
|
||||
verbose: bool = typer.Option(False, "--verbose", "-v", help="Enable verbose output"),
|
||||
) -> None:
|
||||
"""Generate project documentation with Claude AI integration.
|
||||
|
||||
Creates comprehensive documentation by combining core philosophy,
|
||||
architecture, and project-specific content. Supports ai.gpt
|
||||
integration for enhanced documentation generation.
|
||||
|
||||
Examples:
|
||||
|
||||
# Generate basic documentation
|
||||
aigpt docs generate --project=os
|
||||
|
||||
# Generate with custom directory
|
||||
aigpt docs generate --project=gpt --dir ~/ai/ai
|
||||
|
||||
# Generate without auto-pulling missing submodules
|
||||
aigpt docs generate --project=card --no-auto-pull
|
||||
|
||||
# Generate with ai.gpt integration
|
||||
aigpt docs generate --project=card --ai-gpt-integration
|
||||
|
||||
# Preview without writing
|
||||
aigpt docs generate --project=verse --dry-run
|
||||
"""
|
||||
try:
|
||||
# Load configuration
|
||||
with ProgressManager("Loading configuration...") as progress:
|
||||
config = load_docs_config(dir)
|
||||
ai_root = get_ai_root(dir)
|
||||
|
||||
# Ensure submodules are available
|
||||
if auto_pull:
|
||||
with ProgressManager("Checking submodules...") as progress:
|
||||
success, errors = ensure_submodules_available(ai_root, config, auto_clone=True)
|
||||
if not success:
|
||||
console.print(f"[red]Submodule errors: {errors}[/red]")
|
||||
if not typer.confirm("Continue anyway?"):
|
||||
raise typer.Abort()
|
||||
|
||||
# Validate project
|
||||
available_projects = config.list_projects()
|
||||
if not validate_project_name(project, available_projects):
|
||||
console.print(f"[red]Error: Project '{project}' not found[/red]")
|
||||
console.print(f"Available projects: {', '.join(available_projects)}")
|
||||
raise typer.Abort()
|
||||
|
||||
# Parse components
|
||||
components = [c.strip() for c in include.split(",")]
|
||||
|
||||
# Initialize template manager
|
||||
template_manager = DocumentationTemplateManager(config)
|
||||
|
||||
# Validate components
|
||||
valid_components = template_manager.validate_components(components)
|
||||
if valid_components != components:
|
||||
console.print("[yellow]Some components were invalid and filtered out[/yellow]")
|
||||
|
||||
# Show generation info
|
||||
project_info = config.get_project_info(project)
|
||||
|
||||
info_table = Table(title=f"Documentation Generation: {project}")
|
||||
info_table.add_column("Property", style="cyan")
|
||||
info_table.add_column("Value", style="green")
|
||||
|
||||
info_table.add_row("Project Type", project_info.type if project_info else "Unknown")
|
||||
info_table.add_row("Status", project_info.status if project_info else "Unknown")
|
||||
info_table.add_row("Output Path", str(output))
|
||||
info_table.add_row("Components", ", ".join(valid_components))
|
||||
info_table.add_row("AI.GPT Integration", "✓" if ai_gpt_integration else "✗")
|
||||
info_table.add_row("Mode", "Dry Run" if dry_run else "Generate")
|
||||
|
||||
console.print(info_table)
|
||||
console.print()
|
||||
|
||||
# AI.GPT integration
|
||||
if ai_gpt_integration:
|
||||
console.print("[blue]🤖 AI.GPT Integration enabled[/blue]")
|
||||
try:
|
||||
enhanced_content = _integrate_with_ai_gpt(project, valid_components, verbose)
|
||||
if enhanced_content:
|
||||
console.print("[green]✓ AI.GPT enhancement applied[/green]")
|
||||
else:
|
||||
console.print("[yellow]⚠ AI.GPT enhancement failed, using standard generation[/yellow]")
|
||||
except Exception as e:
|
||||
console.print(f"[yellow]⚠ AI.GPT integration error: {e}[/yellow]")
|
||||
console.print("[dim]Falling back to standard generation[/dim]")
|
||||
|
||||
# Generate documentation
|
||||
with ProgressManager("Generating documentation...") as progress:
|
||||
content = template_manager.generate_documentation(
|
||||
project_name=project,
|
||||
components=valid_components,
|
||||
output_path=None if dry_run else output,
|
||||
)
|
||||
|
||||
# Show results
|
||||
if dry_run:
|
||||
console.print(Panel(
|
||||
f"[dim]Preview of generated content ({len(content.splitlines())} lines)[/dim]\n\n" +
|
||||
content[:500] + "\n\n[dim]... (truncated)[/dim]",
|
||||
title="Dry Run Preview",
|
||||
expand=False,
|
||||
))
|
||||
console.print(f"[yellow]🔍 Dry run completed. Would write to: {output}[/yellow]")
|
||||
else:
|
||||
# Write content if not dry run
|
||||
if safe_write_file(output, content):
|
||||
file_size = output.stat().st_size
|
||||
line_count = count_lines(output)
|
||||
|
||||
console.print(f"[green]✅ Generated: {output}[/green]")
|
||||
console.print(f"[dim]📏 Size: {format_file_size(file_size)} ({line_count} lines)[/dim]")
|
||||
|
||||
# Show component breakdown
|
||||
if verbose:
|
||||
console.print("\n[blue]📋 Component breakdown:[/blue]")
|
||||
for component in valid_components:
|
||||
component_display = component.replace("_", " ").title()
|
||||
console.print(f" • {component_display}")
|
||||
else:
|
||||
console.print("[red]❌ Failed to write documentation[/red]")
|
||||
raise typer.Abort()
|
||||
|
||||
except Exception as e:
|
||||
if verbose:
|
||||
console.print_exception()
|
||||
else:
|
||||
console.print(f"[red]Error: {e}[/red]")
|
||||
raise typer.Abort()
|
||||
|
||||
|
||||
@docs_app.command("sync")
|
||||
def sync_docs(
|
||||
project: Optional[str] = typer.Option(None, "--project", "-p", help="Sync specific project"),
|
||||
sync_all: bool = typer.Option(False, "--all", "-a", help="Sync all available projects"),
|
||||
dry_run: bool = typer.Option(False, "--dry-run", help="Show what would be done without making changes"),
|
||||
include: str = typer.Option("core,specific", "--include", "-i", help="Components to include in sync"),
|
||||
dir: Optional[Path] = typer.Option(None, "--dir", "-d", help="AI ecosystem root directory"),
|
||||
auto_pull: bool = typer.Option(True, "--auto-pull/--no-auto-pull", help="Automatically pull missing submodules"),
|
||||
ai_gpt_integration: bool = typer.Option(False, "--ai-gpt-integration", help="Enable ai.gpt integration"),
|
||||
verbose: bool = typer.Option(False, "--verbose", "-v", help="Enable verbose output"),
|
||||
) -> None:
|
||||
"""Sync documentation across multiple projects.
|
||||
|
||||
Synchronizes Claude documentation from the central claude/ directory
|
||||
to individual project directories. Supports both single-project and
|
||||
bulk synchronization operations.
|
||||
|
||||
Examples:
|
||||
|
||||
# Sync specific project
|
||||
aigpt docs sync --project=os
|
||||
|
||||
# Sync all projects with custom directory
|
||||
aigpt docs sync --all --dir ~/ai/ai
|
||||
|
||||
# Preview sync operations
|
||||
aigpt docs sync --all --dry-run
|
||||
|
||||
# Sync without auto-pulling submodules
|
||||
aigpt docs sync --project=gpt --no-auto-pull
|
||||
"""
|
||||
# Validate arguments
|
||||
if not project and not sync_all:
|
||||
console.print("[red]Error: Either --project or --all is required[/red]")
|
||||
raise typer.Abort()
|
||||
|
||||
if project and sync_all:
|
||||
console.print("[red]Error: Cannot use both --project and --all[/red]")
|
||||
raise typer.Abort()
|
||||
|
||||
try:
|
||||
# Load configuration
|
||||
with ProgressManager("Loading configuration...") as progress:
|
||||
config = load_docs_config(dir)
|
||||
ai_root = get_ai_root(dir)
|
||||
|
||||
# Ensure submodules are available
|
||||
if auto_pull:
|
||||
with ProgressManager("Checking submodules...") as progress:
|
||||
success, errors = ensure_submodules_available(ai_root, config, auto_clone=True)
|
||||
if not success:
|
||||
console.print(f"[red]Submodule errors: {errors}[/red]")
|
||||
if not typer.confirm("Continue anyway?"):
|
||||
raise typer.Abort()
|
||||
|
||||
available_projects = config.list_projects()
|
||||
|
||||
# Validate specific project if provided
|
||||
if project and not validate_project_name(project, available_projects):
|
||||
console.print(f"[red]Error: Project '{project}' not found[/red]")
|
||||
console.print(f"Available projects: {', '.join(available_projects)}")
|
||||
raise typer.Abort()
|
||||
|
||||
# Determine projects to sync
|
||||
if sync_all:
|
||||
target_projects = available_projects
|
||||
else:
|
||||
target_projects = [project]
|
||||
|
||||
# Find project directories
|
||||
project_dirs = find_project_directories(ai_root, target_projects)
|
||||
|
||||
# Show sync information
|
||||
sync_table = Table(title="Documentation Sync Plan")
|
||||
sync_table.add_column("Project", style="cyan")
|
||||
sync_table.add_column("Directory", style="blue")
|
||||
sync_table.add_column("Status", style="green")
|
||||
sync_table.add_column("Components", style="yellow")
|
||||
|
||||
for proj in target_projects:
|
||||
if proj in project_dirs:
|
||||
target_file = project_dirs[proj] / "claude.md"
|
||||
status = "✓ Found" if target_file.parent.exists() else "⚠ Missing"
|
||||
sync_table.add_row(proj, str(project_dirs[proj]), status, include)
|
||||
else:
|
||||
sync_table.add_row(proj, "Not found", "❌ Missing", "N/A")
|
||||
|
||||
console.print(sync_table)
|
||||
console.print()
|
||||
|
||||
if dry_run:
|
||||
console.print("[yellow]🔍 DRY RUN MODE - No files will be modified[/yellow]")
|
||||
|
||||
# AI.GPT integration setup
|
||||
if ai_gpt_integration:
|
||||
console.print("[blue]🤖 AI.GPT Integration enabled[/blue]")
|
||||
console.print("[dim]Enhanced documentation generation will be applied[/dim]")
|
||||
console.print()
|
||||
|
||||
# Perform sync operations
|
||||
sync_results = []
|
||||
|
||||
for proj in track(target_projects, description="Syncing projects..."):
|
||||
result = _sync_project(
|
||||
proj,
|
||||
project_dirs.get(proj),
|
||||
include,
|
||||
dry_run,
|
||||
ai_gpt_integration,
|
||||
verbose
|
||||
)
|
||||
sync_results.append((proj, result))
|
||||
|
||||
# Show results summary
|
||||
_show_sync_summary(sync_results, dry_run)
|
||||
|
||||
except Exception as e:
|
||||
if verbose:
|
||||
console.print_exception()
|
||||
else:
|
||||
console.print(f"[red]Error: {e}[/red]")
|
||||
raise typer.Abort()
|
||||
|
||||
|
||||
def _sync_project(
|
||||
project_name: str,
|
||||
project_dir: Optional[Path],
|
||||
include: str,
|
||||
dry_run: bool,
|
||||
ai_gpt_integration: bool,
|
||||
verbose: bool,
|
||||
) -> Dict:
|
||||
"""Sync a single project."""
|
||||
result = {
|
||||
"project": project_name,
|
||||
"success": False,
|
||||
"message": "",
|
||||
"output_file": None,
|
||||
"lines": 0,
|
||||
}
|
||||
|
||||
if not project_dir:
|
||||
result["message"] = "Directory not found"
|
||||
return result
|
||||
|
||||
if not project_dir.exists():
|
||||
result["message"] = f"Directory does not exist: {project_dir}"
|
||||
return result
|
||||
|
||||
target_file = project_dir / "claude.md"
|
||||
|
||||
if dry_run:
|
||||
result["success"] = True
|
||||
result["message"] = f"Would sync to {target_file}"
|
||||
result["output_file"] = target_file
|
||||
return result
|
||||
|
||||
try:
|
||||
# Use the generate functionality
|
||||
config = load_docs_config()
|
||||
template_manager = DocumentationTemplateManager(config)
|
||||
|
||||
# Generate documentation
|
||||
content = template_manager.generate_documentation(
|
||||
project_name=project_name,
|
||||
components=[c.strip() for c in include.split(",")],
|
||||
output_path=target_file,
|
||||
)
|
||||
|
||||
result["success"] = True
|
||||
result["message"] = "Successfully synced"
|
||||
result["output_file"] = target_file
|
||||
result["lines"] = len(content.splitlines())
|
||||
|
||||
if verbose:
|
||||
console.print(f"[dim]✓ Synced {project_name} → {target_file}[/dim]")
|
||||
|
||||
except Exception as e:
|
||||
result["message"] = f"Sync failed: {str(e)}"
|
||||
if verbose:
|
||||
console.print(f"[red]✗ Failed {project_name}: {e}[/red]")
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def _show_sync_summary(sync_results: List[tuple], dry_run: bool) -> None:
|
||||
"""Show sync operation summary."""
|
||||
success_count = sum(1 for _, result in sync_results if result["success"])
|
||||
total_count = len(sync_results)
|
||||
error_count = total_count - success_count
|
||||
|
||||
# Summary table
|
||||
summary_table = Table(title="Sync Summary")
|
||||
summary_table.add_column("Metric", style="cyan")
|
||||
summary_table.add_column("Value", style="green")
|
||||
|
||||
summary_table.add_row("Total Projects", str(total_count))
|
||||
summary_table.add_row("Successful", str(success_count))
|
||||
summary_table.add_row("Failed", str(error_count))
|
||||
|
||||
if not dry_run:
|
||||
total_lines = sum(result["lines"] for _, result in sync_results if result["success"])
|
||||
summary_table.add_row("Total Lines Generated", str(total_lines))
|
||||
|
||||
console.print()
|
||||
console.print(summary_table)
|
||||
|
||||
# Show errors if any
|
||||
if error_count > 0:
|
||||
console.print()
|
||||
console.print("[red]❌ Failed Projects:[/red]")
|
||||
for project_name, result in sync_results:
|
||||
if not result["success"]:
|
||||
console.print(f" • {project_name}: {result['message']}")
|
||||
|
||||
# Final status
|
||||
console.print()
|
||||
if dry_run:
|
||||
console.print("[yellow]🔍 This was a dry run. To apply changes, run without --dry-run[/yellow]")
|
||||
elif error_count == 0:
|
||||
console.print("[green]🎉 All projects synced successfully![/green]")
|
||||
else:
|
||||
console.print(f"[yellow]⚠ Completed with {error_count} error(s)[/yellow]")
|
||||
|
||||
|
||||
def _integrate_with_ai_gpt(project: str, components: List[str], verbose: bool) -> Optional[str]:
|
||||
"""Integrate with ai.gpt for enhanced documentation generation."""
|
||||
try:
|
||||
from ..ai_provider import create_ai_provider
|
||||
from ..persona import Persona
|
||||
from ..config import Config
|
||||
|
||||
config = Config()
|
||||
ai_root = config.data_dir.parent if config.data_dir else Path.cwd()
|
||||
|
||||
# Create AI provider
|
||||
provider = config.get("default_provider", "ollama")
|
||||
model = config.get(f"providers.{provider}.default_model", "qwen2.5")
|
||||
|
||||
ai_provider = create_ai_provider(provider=provider, model=model)
|
||||
persona = Persona(config.data_dir)
|
||||
|
||||
# Create enhancement prompt
|
||||
enhancement_prompt = f"""As an AI documentation expert, enhance the documentation for project '{project}'.
|
||||
|
||||
Project type: {project}
|
||||
Components to include: {', '.join(components)}
|
||||
|
||||
Please provide:
|
||||
1. Improved project description
|
||||
2. Key features that should be highlighted
|
||||
3. Usage examples
|
||||
4. Integration points with other AI ecosystem projects
|
||||
5. Development workflow recommendations
|
||||
|
||||
Focus on making the documentation more comprehensive and user-friendly."""
|
||||
|
||||
if verbose:
|
||||
console.print("[dim]Generating AI-enhanced content...[/dim]")
|
||||
|
||||
# Get AI response
|
||||
response, _ = persona.process_interaction(
|
||||
"docs_system",
|
||||
enhancement_prompt,
|
||||
ai_provider
|
||||
)
|
||||
|
||||
if verbose:
|
||||
console.print("[green]✓ AI enhancement generated[/green]")
|
||||
|
||||
return response
|
||||
|
||||
except ImportError as e:
|
||||
if verbose:
|
||||
console.print(f"[yellow]AI integration unavailable: {e}[/yellow]")
|
||||
return None
|
||||
except Exception as e:
|
||||
if verbose:
|
||||
console.print(f"[red]AI integration error: {e}[/red]")
|
||||
return None
|
||||
|
||||
|
||||
# Add aliases for convenience
|
||||
@docs_app.command("gen")
|
||||
def generate_docs_alias(
|
||||
project: str = typer.Option(..., "--project", "-p", help="Project name"),
|
||||
output: Path = typer.Option(Path("./claude.md"), "--output", "-o", help="Output file path"),
|
||||
include: str = typer.Option("core,specific", "--include", "-i", help="Components to include"),
|
||||
ai_gpt_integration: bool = typer.Option(False, "--ai-gpt-integration", help="Enable ai.gpt integration"),
|
||||
dry_run: bool = typer.Option(False, "--dry-run", help="Preview mode"),
|
||||
verbose: bool = typer.Option(False, "--verbose", "-v", help="Verbose output"),
|
||||
) -> None:
|
||||
"""Alias for generate command."""
|
||||
generate_docs(project, output, include, ai_gpt_integration, dry_run, verbose)
|
||||
|
||||
|
||||
@docs_app.command("wiki")
|
||||
def wiki_management(
|
||||
action: str = typer.Option("update-auto", "--action", "-a", help="Action to perform (update-auto, build-home, status)"),
|
||||
dir: Optional[Path] = typer.Option(None, "--dir", "-d", help="AI ecosystem root directory"),
|
||||
auto_pull: bool = typer.Option(True, "--auto-pull/--no-auto-pull", help="Pull latest wiki changes before update"),
|
||||
ai_enhance: bool = typer.Option(False, "--ai-enhance", help="Use AI to enhance wiki content"),
|
||||
dry_run: bool = typer.Option(False, "--dry-run", help="Show what would be done without making changes"),
|
||||
verbose: bool = typer.Option(False, "--verbose", "-v", help="Enable verbose output"),
|
||||
) -> None:
|
||||
"""Manage AI wiki generation and updates.
|
||||
|
||||
Automatically generates wiki pages from project claude.md files
|
||||
and maintains the ai.wiki repository structure.
|
||||
|
||||
Actions:
|
||||
- update-auto: Generate auto/ directory with project summaries
|
||||
- build-home: Rebuild Home.md from all projects
|
||||
- status: Show wiki repository status
|
||||
|
||||
Examples:
|
||||
|
||||
# Update auto-generated content (with auto-pull)
|
||||
aigpt docs wiki --action=update-auto
|
||||
|
||||
# Update without pulling latest changes
|
||||
aigpt docs wiki --action=update-auto --no-auto-pull
|
||||
|
||||
# Update with custom directory
|
||||
aigpt docs wiki --action=update-auto --dir ~/ai/ai
|
||||
|
||||
# Preview what would be generated
|
||||
aigpt docs wiki --action=update-auto --dry-run
|
||||
|
||||
# Check wiki status
|
||||
aigpt docs wiki --action=status
|
||||
"""
|
||||
try:
|
||||
# Load configuration
|
||||
with ProgressManager("Loading configuration...") as progress:
|
||||
config = load_docs_config(dir)
|
||||
ai_root = get_ai_root(dir)
|
||||
|
||||
# Initialize wiki generator
|
||||
wiki_generator = WikiGenerator(config, ai_root)
|
||||
|
||||
if not wiki_generator.wiki_root:
|
||||
console.print("[red]❌ ai.wiki directory not found[/red]")
|
||||
console.print(f"Expected location: {ai_root / 'ai.wiki'}")
|
||||
console.print("Please ensure ai.wiki submodule is cloned")
|
||||
raise typer.Abort()
|
||||
|
||||
# Show wiki information
|
||||
if verbose:
|
||||
console.print(f"[blue]📁 Wiki root: {wiki_generator.wiki_root}[/blue]")
|
||||
console.print(f"[blue]📁 AI root: {ai_root}[/blue]")
|
||||
|
||||
if action == "status":
|
||||
_show_wiki_status(wiki_generator, ai_root)
|
||||
|
||||
elif action == "update-auto":
|
||||
if dry_run:
|
||||
console.print("[yellow]🔍 DRY RUN MODE - No files will be modified[/yellow]")
|
||||
if auto_pull:
|
||||
console.print("[blue]📥 Would pull latest wiki changes[/blue]")
|
||||
# Show what would be generated
|
||||
project_dirs = find_project_directories(ai_root, config.list_projects())
|
||||
console.print(f"[blue]📋 Would generate {len(project_dirs)} project pages:[/blue]")
|
||||
for project_name in project_dirs.keys():
|
||||
console.print(f" • auto/{project_name}.md")
|
||||
console.print(" • Home.md")
|
||||
else:
|
||||
with ProgressManager("Updating wiki auto directory...") as progress:
|
||||
success, updated_files = wiki_generator.update_wiki_auto_directory(
|
||||
auto_pull=auto_pull,
|
||||
ai_enhance=ai_enhance
|
||||
)
|
||||
|
||||
if success:
|
||||
console.print(f"[green]✅ Successfully updated {len(updated_files)} files[/green]")
|
||||
if verbose:
|
||||
for file in updated_files:
|
||||
console.print(f" • {file}")
|
||||
else:
|
||||
console.print("[red]❌ Failed to update wiki[/red]")
|
||||
raise typer.Abort()
|
||||
|
||||
elif action == "build-home":
|
||||
console.print("[blue]🏠 Building Home.md...[/blue]")
|
||||
# This would be implemented to rebuild just Home.md
|
||||
console.print("[yellow]⚠ build-home action not yet implemented[/yellow]")
|
||||
|
||||
else:
|
||||
console.print(f"[red]Unknown action: {action}[/red]")
|
||||
console.print("Available actions: update-auto, build-home, status")
|
||||
raise typer.Abort()
|
||||
|
||||
except Exception as e:
|
||||
if verbose:
|
||||
console.print_exception()
|
||||
else:
|
||||
console.print(f"[red]Error: {e}[/red]")
|
||||
raise typer.Abort()
|
||||
|
||||
|
||||
def _show_wiki_status(wiki_generator: WikiGenerator, ai_root: Path) -> None:
|
||||
"""Show wiki repository status."""
|
||||
console.print("[blue]📊 AI Wiki Status[/blue]")
|
||||
|
||||
# Check wiki directory structure
|
||||
wiki_root = wiki_generator.wiki_root
|
||||
status_table = Table(title="Wiki Directory Status")
|
||||
status_table.add_column("Directory", style="cyan")
|
||||
status_table.add_column("Status", style="green")
|
||||
status_table.add_column("Files", style="yellow")
|
||||
|
||||
directories = ["auto", "claude", "manual"]
|
||||
for dir_name in directories:
|
||||
dir_path = wiki_root / dir_name
|
||||
if dir_path.exists():
|
||||
file_count = len(list(dir_path.glob("*.md")))
|
||||
status = "✓ Exists"
|
||||
files = f"{file_count} files"
|
||||
else:
|
||||
status = "❌ Missing"
|
||||
files = "N/A"
|
||||
|
||||
status_table.add_row(dir_name, status, files)
|
||||
|
||||
# Check Home.md
|
||||
home_path = wiki_root / "Home.md"
|
||||
home_status = "✓ Exists" if home_path.exists() else "❌ Missing"
|
||||
status_table.add_row("Home.md", home_status, "1 file" if home_path.exists() else "N/A")
|
||||
|
||||
console.print(status_table)
|
||||
|
||||
# Show project coverage
|
||||
config = wiki_generator.config
|
||||
project_dirs = find_project_directories(ai_root, config.list_projects())
|
||||
auto_dir = wiki_root / "auto"
|
||||
|
||||
if auto_dir.exists():
|
||||
existing_wiki_files = set(f.stem for f in auto_dir.glob("*.md"))
|
||||
available_projects = set(project_dirs.keys())
|
||||
|
||||
missing = available_projects - existing_wiki_files
|
||||
orphaned = existing_wiki_files - available_projects
|
||||
|
||||
console.print(f"\n[blue]📋 Project Coverage:[/blue]")
|
||||
console.print(f" • Total projects: {len(available_projects)}")
|
||||
console.print(f" • Wiki pages: {len(existing_wiki_files)}")
|
||||
|
||||
if missing:
|
||||
console.print(f" • Missing wiki pages: {', '.join(missing)}")
|
||||
if orphaned:
|
||||
console.print(f" • Orphaned wiki pages: {', '.join(orphaned)}")
|
||||
|
||||
if not missing and not orphaned:
|
||||
console.print(f" • ✅ All projects have wiki pages")
|
||||
|
||||
|
||||
@docs_app.command("config")
|
||||
def docs_config(
|
||||
action: str = typer.Option("show", "--action", "-a", help="Action (show, set-dir, clear-dir)"),
|
||||
value: Optional[str] = typer.Option(None, "--value", "-v", help="Value to set"),
|
||||
verbose: bool = typer.Option(False, "--verbose", help="Enable verbose output"),
|
||||
) -> None:
|
||||
"""Manage documentation configuration.
|
||||
|
||||
Configure default settings for aigpt docs commands to avoid
|
||||
repeating options like --dir every time.
|
||||
|
||||
Actions:
|
||||
- show: Display current configuration
|
||||
- set-dir: Set default AI root directory
|
||||
- clear-dir: Clear default AI root directory
|
||||
|
||||
Examples:
|
||||
|
||||
# Show current config
|
||||
aigpt docs config --action=show
|
||||
|
||||
# Set default directory
|
||||
aigpt docs config --action=set-dir --value=~/ai/ai
|
||||
|
||||
# Clear default directory
|
||||
aigpt docs config --action=clear-dir
|
||||
"""
|
||||
try:
|
||||
from ..config import Config
|
||||
config = Config()
|
||||
|
||||
if action == "show":
|
||||
console.print("[blue]📁 AI Documentation Configuration[/blue]")
|
||||
|
||||
# Show current ai_root resolution
|
||||
current_ai_root = get_ai_root()
|
||||
console.print(f"[green]Current AI root: {current_ai_root}[/green]")
|
||||
|
||||
# Show resolution method
|
||||
import os
|
||||
env_dir = os.getenv("AI_DOCS_DIR")
|
||||
config_dir = config.get("docs.ai_root")
|
||||
|
||||
resolution_table = Table(title="Directory Resolution")
|
||||
resolution_table.add_column("Method", style="cyan")
|
||||
resolution_table.add_column("Value", style="yellow")
|
||||
resolution_table.add_column("Status", style="green")
|
||||
|
||||
resolution_table.add_row("Environment (AI_DOCS_DIR)", env_dir or "Not set", "✓ Active" if env_dir else "Not used")
|
||||
resolution_table.add_row("Config file (docs.ai_root)", config_dir or "Not set", "✓ Active" if config_dir and not env_dir else "Not used")
|
||||
resolution_table.add_row("Default (relative)", str(Path(__file__).parent.parent.parent.parent.parent), "✓ Active" if not env_dir and not config_dir else "Not used")
|
||||
|
||||
console.print(resolution_table)
|
||||
|
||||
if verbose:
|
||||
console.print(f"\n[dim]Config file: {config.config_file}[/dim]")
|
||||
|
||||
elif action == "set-dir":
|
||||
if not value:
|
||||
console.print("[red]Error: --value is required for set-dir action[/red]")
|
||||
raise typer.Abort()
|
||||
|
||||
# Expand and validate path
|
||||
ai_root_path = Path(value).expanduser().absolute()
|
||||
|
||||
if not ai_root_path.exists():
|
||||
console.print(f"[yellow]Warning: Directory does not exist: {ai_root_path}[/yellow]")
|
||||
if not typer.confirm("Set anyway?"):
|
||||
raise typer.Abort()
|
||||
|
||||
# Check if ai.json exists
|
||||
ai_json_path = ai_root_path / "ai.json"
|
||||
if not ai_json_path.exists():
|
||||
console.print(f"[yellow]Warning: ai.json not found at: {ai_json_path}[/yellow]")
|
||||
if not typer.confirm("Set anyway?"):
|
||||
raise typer.Abort()
|
||||
|
||||
# Save to config
|
||||
config.set("docs.ai_root", str(ai_root_path))
|
||||
|
||||
console.print(f"[green]✅ Set default AI root directory: {ai_root_path}[/green]")
|
||||
console.print("[dim]This will be used when --dir is not specified and AI_DOCS_DIR is not set[/dim]")
|
||||
|
||||
elif action == "clear-dir":
|
||||
config.delete("docs.ai_root")
|
||||
|
||||
console.print("[green]✅ Cleared default AI root directory[/green]")
|
||||
console.print("[dim]Will use default relative path when --dir and AI_DOCS_DIR are not set[/dim]")
|
||||
|
||||
else:
|
||||
console.print(f"[red]Unknown action: {action}[/red]")
|
||||
console.print("Available actions: show, set-dir, clear-dir")
|
||||
raise typer.Abort()
|
||||
|
||||
except Exception as e:
|
||||
if verbose:
|
||||
console.print_exception()
|
||||
else:
|
||||
console.print(f"[red]Error: {e}[/red]")
|
||||
raise typer.Abort()
|
||||
|
||||
|
||||
# Export the docs app
|
||||
__all__ = ["docs_app"]
|
305
python_backup/src/aigpt/commands/submodules.py
Normal file
305
python_backup/src/aigpt/commands/submodules.py
Normal file
@@ -0,0 +1,305 @@
|
||||
"""Submodule management commands for ai.gpt."""
|
||||
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional, Tuple
|
||||
import subprocess
|
||||
import json
|
||||
|
||||
import typer
|
||||
from rich.console import Console
|
||||
from rich.panel import Panel
|
||||
from rich.table import Table
|
||||
|
||||
from ..docs.config import get_ai_root, load_docs_config
|
||||
from ..docs.git_utils import (
|
||||
check_git_repository,
|
||||
get_git_branch,
|
||||
get_git_remote_url
|
||||
)
|
||||
from ..docs.utils import run_command
|
||||
|
||||
console = Console()
|
||||
submodules_app = typer.Typer(help="Submodule management for AI ecosystem")
|
||||
|
||||
|
||||
def get_submodules_from_gitmodules(repo_path: Path) -> Dict[str, str]:
|
||||
"""Parse .gitmodules file to get submodule information."""
|
||||
gitmodules_path = repo_path / ".gitmodules"
|
||||
if not gitmodules_path.exists():
|
||||
return {}
|
||||
|
||||
submodules = {}
|
||||
current_name = None
|
||||
|
||||
with open(gitmodules_path, 'r') as f:
|
||||
for line in f:
|
||||
line = line.strip()
|
||||
if line.startswith('[submodule "') and line.endswith('"]'):
|
||||
current_name = line[12:-2] # Extract module name
|
||||
elif line.startswith('path = ') and current_name:
|
||||
path = line[7:] # Extract path
|
||||
submodules[current_name] = path
|
||||
current_name = None
|
||||
|
||||
return submodules
|
||||
|
||||
|
||||
def get_branch_for_module(config, module_name: str) -> str:
|
||||
"""Get target branch for a module from ai.json."""
|
||||
project_info = config.get_project_info(module_name)
|
||||
if project_info and project_info.branch:
|
||||
return project_info.branch
|
||||
return "main" # Default branch
|
||||
|
||||
|
||||
@submodules_app.command("list")
|
||||
def list_submodules(
|
||||
dir: Optional[Path] = typer.Option(None, "--dir", "-d", help="AI ecosystem root directory"),
|
||||
verbose: bool = typer.Option(False, "--verbose", "-v", help="Show detailed information")
|
||||
):
|
||||
"""List all submodules and their status."""
|
||||
try:
|
||||
config = load_docs_config(dir)
|
||||
ai_root = get_ai_root(dir)
|
||||
|
||||
if not check_git_repository(ai_root):
|
||||
console.print("[red]Error: Not a git repository[/red]")
|
||||
raise typer.Abort()
|
||||
|
||||
submodules = get_submodules_from_gitmodules(ai_root)
|
||||
|
||||
if not submodules:
|
||||
console.print("[yellow]No submodules found[/yellow]")
|
||||
return
|
||||
|
||||
table = Table(title="Submodules Status")
|
||||
table.add_column("Module", style="cyan")
|
||||
table.add_column("Path", style="blue")
|
||||
table.add_column("Branch", style="green")
|
||||
table.add_column("Status", style="yellow")
|
||||
|
||||
for module_name, module_path in submodules.items():
|
||||
full_path = ai_root / module_path
|
||||
|
||||
if not full_path.exists():
|
||||
status = "❌ Missing"
|
||||
branch = "N/A"
|
||||
else:
|
||||
branch = get_git_branch(full_path) or "detached"
|
||||
|
||||
# Check if submodule is up to date
|
||||
returncode, stdout, stderr = run_command(
|
||||
["git", "submodule", "status", module_path],
|
||||
cwd=ai_root
|
||||
)
|
||||
|
||||
if returncode == 0 and stdout:
|
||||
status_char = stdout[0] if stdout else ' '
|
||||
if status_char == ' ':
|
||||
status = "✅ Clean"
|
||||
elif status_char == '+':
|
||||
status = "📝 Modified"
|
||||
elif status_char == '-':
|
||||
status = "❌ Not initialized"
|
||||
elif status_char == 'U':
|
||||
status = "⚠️ Conflicts"
|
||||
else:
|
||||
status = "❓ Unknown"
|
||||
else:
|
||||
status = "❓ Unknown"
|
||||
|
||||
target_branch = get_branch_for_module(config, module_name)
|
||||
branch_display = f"{branch}"
|
||||
if branch != target_branch:
|
||||
branch_display += f" (target: {target_branch})"
|
||||
|
||||
table.add_row(module_name, module_path, branch_display, status)
|
||||
|
||||
console.print(table)
|
||||
|
||||
if verbose:
|
||||
console.print(f"\n[dim]Total submodules: {len(submodules)}[/dim]")
|
||||
console.print(f"[dim]Repository root: {ai_root}[/dim]")
|
||||
|
||||
except Exception as e:
|
||||
console.print(f"[red]Error: {e}[/red]")
|
||||
raise typer.Abort()
|
||||
|
||||
|
||||
@submodules_app.command("update")
|
||||
def update_submodules(
|
||||
module: Optional[str] = typer.Option(None, "--module", "-m", help="Update specific submodule"),
|
||||
all: bool = typer.Option(False, "--all", "-a", help="Update all submodules"),
|
||||
dir: Optional[Path] = typer.Option(None, "--dir", "-d", help="AI ecosystem root directory"),
|
||||
dry_run: bool = typer.Option(False, "--dry-run", help="Show what would be done"),
|
||||
auto_commit: bool = typer.Option(False, "--auto-commit", help="Auto-commit changes"),
|
||||
verbose: bool = typer.Option(False, "--verbose", "-v", help="Show detailed output")
|
||||
):
|
||||
"""Update submodules to latest commits."""
|
||||
if not module and not all:
|
||||
console.print("[red]Error: Either --module or --all is required[/red]")
|
||||
raise typer.Abort()
|
||||
|
||||
if module and all:
|
||||
console.print("[red]Error: Cannot use both --module and --all[/red]")
|
||||
raise typer.Abort()
|
||||
|
||||
try:
|
||||
config = load_docs_config(dir)
|
||||
ai_root = get_ai_root(dir)
|
||||
|
||||
if not check_git_repository(ai_root):
|
||||
console.print("[red]Error: Not a git repository[/red]")
|
||||
raise typer.Abort()
|
||||
|
||||
submodules = get_submodules_from_gitmodules(ai_root)
|
||||
|
||||
if not submodules:
|
||||
console.print("[yellow]No submodules found[/yellow]")
|
||||
return
|
||||
|
||||
# Determine which modules to update
|
||||
if all:
|
||||
modules_to_update = list(submodules.keys())
|
||||
else:
|
||||
if module not in submodules:
|
||||
console.print(f"[red]Error: Submodule '{module}' not found[/red]")
|
||||
console.print(f"Available modules: {', '.join(submodules.keys())}")
|
||||
raise typer.Abort()
|
||||
modules_to_update = [module]
|
||||
|
||||
if dry_run:
|
||||
console.print("[yellow]🔍 DRY RUN MODE - No changes will be made[/yellow]")
|
||||
|
||||
console.print(f"[cyan]Updating {len(modules_to_update)} submodule(s)...[/cyan]")
|
||||
|
||||
updated_modules = []
|
||||
|
||||
for module_name in modules_to_update:
|
||||
module_path = submodules[module_name]
|
||||
full_path = ai_root / module_path
|
||||
target_branch = get_branch_for_module(config, module_name)
|
||||
|
||||
console.print(f"\n[blue]📦 Processing: {module_name}[/blue]")
|
||||
|
||||
if not full_path.exists():
|
||||
console.print(f"[red]❌ Module directory not found: {module_path}[/red]")
|
||||
continue
|
||||
|
||||
# Get current commit
|
||||
current_commit = None
|
||||
returncode, stdout, stderr = run_command(
|
||||
["git", "rev-parse", "HEAD"],
|
||||
cwd=full_path
|
||||
)
|
||||
if returncode == 0:
|
||||
current_commit = stdout.strip()[:8]
|
||||
|
||||
if dry_run:
|
||||
console.print(f"[yellow]🔍 Would update {module_name} to branch {target_branch}[/yellow]")
|
||||
if current_commit:
|
||||
console.print(f"[dim]Current: {current_commit}[/dim]")
|
||||
continue
|
||||
|
||||
# Fetch latest changes
|
||||
console.print(f"[dim]Fetching latest changes...[/dim]")
|
||||
returncode, stdout, stderr = run_command(
|
||||
["git", "fetch", "origin"],
|
||||
cwd=full_path
|
||||
)
|
||||
|
||||
if returncode != 0:
|
||||
console.print(f"[red]❌ Failed to fetch: {stderr}[/red]")
|
||||
continue
|
||||
|
||||
# Check if update is needed
|
||||
returncode, stdout, stderr = run_command(
|
||||
["git", "rev-parse", f"origin/{target_branch}"],
|
||||
cwd=full_path
|
||||
)
|
||||
|
||||
if returncode != 0:
|
||||
console.print(f"[red]❌ Branch {target_branch} not found on remote[/red]")
|
||||
continue
|
||||
|
||||
latest_commit = stdout.strip()[:8]
|
||||
|
||||
if current_commit == latest_commit:
|
||||
console.print(f"[green]✅ Already up to date[/green]")
|
||||
continue
|
||||
|
||||
# Switch to target branch and pull
|
||||
console.print(f"[dim]Switching to branch {target_branch}...[/dim]")
|
||||
returncode, stdout, stderr = run_command(
|
||||
["git", "checkout", target_branch],
|
||||
cwd=full_path
|
||||
)
|
||||
|
||||
if returncode != 0:
|
||||
console.print(f"[red]❌ Failed to checkout {target_branch}: {stderr}[/red]")
|
||||
continue
|
||||
|
||||
returncode, stdout, stderr = run_command(
|
||||
["git", "pull", "origin", target_branch],
|
||||
cwd=full_path
|
||||
)
|
||||
|
||||
if returncode != 0:
|
||||
console.print(f"[red]❌ Failed to pull: {stderr}[/red]")
|
||||
continue
|
||||
|
||||
# Get new commit
|
||||
returncode, stdout, stderr = run_command(
|
||||
["git", "rev-parse", "HEAD"],
|
||||
cwd=full_path
|
||||
)
|
||||
new_commit = stdout.strip()[:8] if returncode == 0 else "unknown"
|
||||
|
||||
# Stage the submodule update
|
||||
returncode, stdout, stderr = run_command(
|
||||
["git", "add", module_path],
|
||||
cwd=ai_root
|
||||
)
|
||||
|
||||
console.print(f"[green]✅ Updated {module_name} ({current_commit} → {new_commit})[/green]")
|
||||
updated_modules.append((module_name, current_commit, new_commit))
|
||||
|
||||
# Summary
|
||||
if updated_modules:
|
||||
console.print(f"\n[green]🎉 Successfully updated {len(updated_modules)} module(s)[/green]")
|
||||
|
||||
if verbose:
|
||||
for module_name, old_commit, new_commit in updated_modules:
|
||||
console.print(f" • {module_name}: {old_commit} → {new_commit}")
|
||||
|
||||
if auto_commit and not dry_run:
|
||||
console.print("[blue]💾 Auto-committing changes...[/blue]")
|
||||
commit_message = f"Update submodules\n\n📦 Updated modules: {len(updated_modules)}\n"
|
||||
for module_name, old_commit, new_commit in updated_modules:
|
||||
commit_message += f"- {module_name}: {old_commit} → {new_commit}\n"
|
||||
commit_message += "\n🤖 Generated with ai.gpt submodules update"
|
||||
|
||||
returncode, stdout, stderr = run_command(
|
||||
["git", "commit", "-m", commit_message],
|
||||
cwd=ai_root
|
||||
)
|
||||
|
||||
if returncode == 0:
|
||||
console.print("[green]✅ Changes committed successfully[/green]")
|
||||
else:
|
||||
console.print(f"[red]❌ Failed to commit: {stderr}[/red]")
|
||||
elif not dry_run:
|
||||
console.print("[yellow]💾 Changes staged but not committed[/yellow]")
|
||||
console.print("Run with --auto-commit to commit automatically")
|
||||
elif not dry_run:
|
||||
console.print("[yellow]No modules needed updating[/yellow]")
|
||||
|
||||
except Exception as e:
|
||||
console.print(f"[red]Error: {e}[/red]")
|
||||
if verbose:
|
||||
console.print_exception()
|
||||
raise typer.Abort()
|
||||
|
||||
|
||||
# Export the submodules app
|
||||
__all__ = ["submodules_app"]
|
440
python_backup/src/aigpt/commands/tokens.py
Normal file
440
python_backup/src/aigpt/commands/tokens.py
Normal file
@@ -0,0 +1,440 @@
|
||||
"""Claude Code token usage and cost analysis commands."""
|
||||
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional, Tuple
|
||||
from datetime import datetime, timedelta
|
||||
import json
|
||||
import sqlite3
|
||||
|
||||
import typer
|
||||
from rich.console import Console
|
||||
from rich.panel import Panel
|
||||
from rich.table import Table
|
||||
from rich.progress import track
|
||||
|
||||
console = Console()
|
||||
tokens_app = typer.Typer(help="Claude Code token usage and cost analysis")
|
||||
|
||||
# Claude Code pricing (estimated rates in USD)
|
||||
CLAUDE_PRICING = {
|
||||
"input_tokens_per_1k": 0.003, # $3 per 1M input tokens
|
||||
"output_tokens_per_1k": 0.015, # $15 per 1M output tokens
|
||||
"usd_to_jpy": 150 # Exchange rate
|
||||
}
|
||||
|
||||
|
||||
def find_claude_data_dir() -> Optional[Path]:
|
||||
"""Find Claude Code data directory."""
|
||||
possible_paths = [
|
||||
Path.home() / ".claude",
|
||||
Path.home() / ".config" / "claude",
|
||||
Path.cwd() / ".claude"
|
||||
]
|
||||
|
||||
for path in possible_paths:
|
||||
if path.exists() and (path / "projects").exists():
|
||||
return path
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def parse_jsonl_files(claude_dir: Path) -> List[Dict]:
|
||||
"""Parse Claude Code JSONL files safely."""
|
||||
records = []
|
||||
projects_dir = claude_dir / "projects"
|
||||
|
||||
if not projects_dir.exists():
|
||||
return records
|
||||
|
||||
# Find all .jsonl files recursively
|
||||
jsonl_files = list(projects_dir.rglob("*.jsonl"))
|
||||
|
||||
for jsonl_file in track(jsonl_files, description="Reading Claude data..."):
|
||||
try:
|
||||
with open(jsonl_file, 'r', encoding='utf-8') as f:
|
||||
for line_num, line in enumerate(f, 1):
|
||||
line = line.strip()
|
||||
if not line:
|
||||
continue
|
||||
|
||||
try:
|
||||
record = json.loads(line)
|
||||
# Only include records with usage information
|
||||
if (record.get('type') == 'assistant' and
|
||||
'message' in record and
|
||||
'usage' in record.get('message', {})):
|
||||
records.append(record)
|
||||
except json.JSONDecodeError:
|
||||
# Skip malformed JSON lines
|
||||
continue
|
||||
|
||||
except (IOError, PermissionError):
|
||||
# Skip files we can't read
|
||||
continue
|
||||
|
||||
return records
|
||||
|
||||
|
||||
def calculate_costs(records: List[Dict]) -> Dict[str, float]:
|
||||
"""Calculate token costs from usage records."""
|
||||
total_input_tokens = 0
|
||||
total_output_tokens = 0
|
||||
total_cost_usd = 0
|
||||
|
||||
for record in records:
|
||||
try:
|
||||
usage = record.get('message', {}).get('usage', {})
|
||||
|
||||
input_tokens = int(usage.get('input_tokens', 0))
|
||||
output_tokens = int(usage.get('output_tokens', 0))
|
||||
|
||||
# Calculate cost if not provided
|
||||
cost_usd = record.get('costUSD')
|
||||
if cost_usd is None:
|
||||
input_cost = (input_tokens / 1000) * CLAUDE_PRICING["input_tokens_per_1k"]
|
||||
output_cost = (output_tokens / 1000) * CLAUDE_PRICING["output_tokens_per_1k"]
|
||||
cost_usd = input_cost + output_cost
|
||||
else:
|
||||
cost_usd = float(cost_usd)
|
||||
|
||||
total_input_tokens += input_tokens
|
||||
total_output_tokens += output_tokens
|
||||
total_cost_usd += cost_usd
|
||||
|
||||
except (ValueError, TypeError, KeyError):
|
||||
# Skip records with invalid data
|
||||
continue
|
||||
|
||||
return {
|
||||
'input_tokens': total_input_tokens,
|
||||
'output_tokens': total_output_tokens,
|
||||
'total_tokens': total_input_tokens + total_output_tokens,
|
||||
'cost_usd': total_cost_usd,
|
||||
'cost_jpy': total_cost_usd * CLAUDE_PRICING["usd_to_jpy"]
|
||||
}
|
||||
|
||||
|
||||
def group_by_date(records: List[Dict]) -> Dict[str, Dict]:
|
||||
"""Group records by date and calculate daily costs."""
|
||||
daily_stats = {}
|
||||
|
||||
for record in records:
|
||||
try:
|
||||
timestamp = record.get('timestamp')
|
||||
if not timestamp:
|
||||
continue
|
||||
|
||||
# Parse timestamp and convert to JST
|
||||
dt = datetime.fromisoformat(timestamp.replace('Z', '+00:00'))
|
||||
# Convert to JST (UTC+9)
|
||||
jst_dt = dt + timedelta(hours=9)
|
||||
date_key = jst_dt.strftime('%Y-%m-%d')
|
||||
|
||||
if date_key not in daily_stats:
|
||||
daily_stats[date_key] = []
|
||||
|
||||
daily_stats[date_key].append(record)
|
||||
|
||||
except (ValueError, TypeError):
|
||||
continue
|
||||
|
||||
# Calculate costs for each day
|
||||
daily_costs = {}
|
||||
for date_key, day_records in daily_stats.items():
|
||||
daily_costs[date_key] = calculate_costs(day_records)
|
||||
|
||||
return daily_costs
|
||||
|
||||
|
||||
@tokens_app.command("summary")
|
||||
def token_summary(
|
||||
period: str = typer.Option("all", help="Period: today, week, month, all"),
|
||||
claude_dir: Optional[Path] = typer.Option(None, "--claude-dir", help="Claude data directory"),
|
||||
show_details: bool = typer.Option(False, "--details", help="Show detailed breakdown"),
|
||||
format: str = typer.Option("table", help="Output format: table, json")
|
||||
):
|
||||
"""Show Claude Code token usage summary and estimated costs."""
|
||||
|
||||
# Find Claude data directory
|
||||
if claude_dir is None:
|
||||
claude_dir = find_claude_data_dir()
|
||||
|
||||
if claude_dir is None:
|
||||
console.print("[red]❌ Claude Code data directory not found[/red]")
|
||||
console.print("[dim]Looked in: ~/.claude, ~/.config/claude, ./.claude[/dim]")
|
||||
raise typer.Abort()
|
||||
|
||||
if not claude_dir.exists():
|
||||
console.print(f"[red]❌ Directory not found: {claude_dir}[/red]")
|
||||
raise typer.Abort()
|
||||
|
||||
console.print(f"[cyan]📊 Analyzing Claude Code usage from: {claude_dir}[/cyan]")
|
||||
|
||||
# Parse data
|
||||
records = parse_jsonl_files(claude_dir)
|
||||
|
||||
if not records:
|
||||
console.print("[yellow]⚠️ No usage data found[/yellow]")
|
||||
return
|
||||
|
||||
# Filter by period
|
||||
now = datetime.now()
|
||||
filtered_records = []
|
||||
|
||||
if period == "today":
|
||||
today = now.strftime('%Y-%m-%d')
|
||||
for record in records:
|
||||
try:
|
||||
timestamp = record.get('timestamp')
|
||||
if timestamp:
|
||||
dt = datetime.fromisoformat(timestamp.replace('Z', '+00:00'))
|
||||
jst_dt = dt + timedelta(hours=9)
|
||||
if jst_dt.strftime('%Y-%m-%d') == today:
|
||||
filtered_records.append(record)
|
||||
except (ValueError, TypeError):
|
||||
continue
|
||||
|
||||
elif period == "week":
|
||||
week_ago = now - timedelta(days=7)
|
||||
for record in records:
|
||||
try:
|
||||
timestamp = record.get('timestamp')
|
||||
if timestamp:
|
||||
dt = datetime.fromisoformat(timestamp.replace('Z', '+00:00'))
|
||||
jst_dt = dt + timedelta(hours=9)
|
||||
if jst_dt.date() >= week_ago.date():
|
||||
filtered_records.append(record)
|
||||
except (ValueError, TypeError):
|
||||
continue
|
||||
|
||||
elif period == "month":
|
||||
month_ago = now - timedelta(days=30)
|
||||
for record in records:
|
||||
try:
|
||||
timestamp = record.get('timestamp')
|
||||
if timestamp:
|
||||
dt = datetime.fromisoformat(timestamp.replace('Z', '+00:00'))
|
||||
jst_dt = dt + timedelta(hours=9)
|
||||
if jst_dt.date() >= month_ago.date():
|
||||
filtered_records.append(record)
|
||||
except (ValueError, TypeError):
|
||||
continue
|
||||
|
||||
else: # all
|
||||
filtered_records = records
|
||||
|
||||
# Calculate total costs
|
||||
total_stats = calculate_costs(filtered_records)
|
||||
|
||||
if format == "json":
|
||||
# JSON output
|
||||
output = {
|
||||
"period": period,
|
||||
"total_records": len(filtered_records),
|
||||
"input_tokens": total_stats['input_tokens'],
|
||||
"output_tokens": total_stats['output_tokens'],
|
||||
"total_tokens": total_stats['total_tokens'],
|
||||
"estimated_cost_usd": round(total_stats['cost_usd'], 2),
|
||||
"estimated_cost_jpy": round(total_stats['cost_jpy'], 0)
|
||||
}
|
||||
console.print(json.dumps(output, indent=2))
|
||||
return
|
||||
|
||||
# Table output
|
||||
console.print(Panel(
|
||||
f"[bold cyan]Claude Code Token Usage Report[/bold cyan]\n\n"
|
||||
f"Period: {period.title()}\n"
|
||||
f"Data source: {claude_dir}",
|
||||
title="📊 Usage Analysis",
|
||||
border_style="cyan"
|
||||
))
|
||||
|
||||
# Summary table
|
||||
summary_table = Table(title="Token Summary")
|
||||
summary_table.add_column("Metric", style="cyan")
|
||||
summary_table.add_column("Value", style="green")
|
||||
|
||||
summary_table.add_row("Input Tokens", f"{total_stats['input_tokens']:,}")
|
||||
summary_table.add_row("Output Tokens", f"{total_stats['output_tokens']:,}")
|
||||
summary_table.add_row("Total Tokens", f"{total_stats['total_tokens']:,}")
|
||||
summary_table.add_row("", "") # Separator
|
||||
summary_table.add_row("Estimated Cost (USD)", f"${total_stats['cost_usd']:.2f}")
|
||||
summary_table.add_row("Estimated Cost (JPY)", f"¥{total_stats['cost_jpy']:,.0f}")
|
||||
summary_table.add_row("Records Analyzed", str(len(filtered_records)))
|
||||
|
||||
console.print(summary_table)
|
||||
|
||||
# Show daily breakdown if requested
|
||||
if show_details:
|
||||
daily_costs = group_by_date(filtered_records)
|
||||
|
||||
if daily_costs:
|
||||
console.print("\n")
|
||||
daily_table = Table(title="Daily Breakdown")
|
||||
daily_table.add_column("Date", style="cyan")
|
||||
daily_table.add_column("Input Tokens", style="blue")
|
||||
daily_table.add_column("Output Tokens", style="green")
|
||||
daily_table.add_column("Total Tokens", style="yellow")
|
||||
daily_table.add_column("Cost (JPY)", style="red")
|
||||
|
||||
for date in sorted(daily_costs.keys(), reverse=True):
|
||||
stats = daily_costs[date]
|
||||
daily_table.add_row(
|
||||
date,
|
||||
f"{stats['input_tokens']:,}",
|
||||
f"{stats['output_tokens']:,}",
|
||||
f"{stats['total_tokens']:,}",
|
||||
f"¥{stats['cost_jpy']:,.0f}"
|
||||
)
|
||||
|
||||
console.print(daily_table)
|
||||
|
||||
# Warning about estimates
|
||||
console.print("\n[dim]💡 Note: Costs are estimates based on Claude API pricing.[/dim]")
|
||||
console.print("[dim] Actual Claude Code subscription costs may differ.[/dim]")
|
||||
|
||||
|
||||
@tokens_app.command("daily")
|
||||
def daily_breakdown(
|
||||
days: int = typer.Option(7, help="Number of days to show"),
|
||||
claude_dir: Optional[Path] = typer.Option(None, "--claude-dir", help="Claude data directory"),
|
||||
):
|
||||
"""Show daily token usage breakdown."""
|
||||
|
||||
# Find Claude data directory
|
||||
if claude_dir is None:
|
||||
claude_dir = find_claude_data_dir()
|
||||
|
||||
if claude_dir is None:
|
||||
console.print("[red]❌ Claude Code data directory not found[/red]")
|
||||
raise typer.Abort()
|
||||
|
||||
console.print(f"[cyan]📅 Daily token usage (last {days} days)[/cyan]")
|
||||
|
||||
# Parse data
|
||||
records = parse_jsonl_files(claude_dir)
|
||||
|
||||
if not records:
|
||||
console.print("[yellow]⚠️ No usage data found[/yellow]")
|
||||
return
|
||||
|
||||
# Group by date
|
||||
daily_costs = group_by_date(records)
|
||||
|
||||
# Get recent days
|
||||
recent_dates = sorted(daily_costs.keys(), reverse=True)[:days]
|
||||
|
||||
if not recent_dates:
|
||||
console.print("[yellow]No recent usage data found[/yellow]")
|
||||
return
|
||||
|
||||
# Create table
|
||||
table = Table(title=f"Daily Usage (Last {len(recent_dates)} days)")
|
||||
table.add_column("Date", style="cyan")
|
||||
table.add_column("Input", style="blue")
|
||||
table.add_column("Output", style="green")
|
||||
table.add_column("Total", style="yellow")
|
||||
table.add_column("Cost (JPY)", style="red")
|
||||
|
||||
total_cost = 0
|
||||
for date in recent_dates:
|
||||
stats = daily_costs[date]
|
||||
total_cost += stats['cost_jpy']
|
||||
|
||||
table.add_row(
|
||||
date,
|
||||
f"{stats['input_tokens']:,}",
|
||||
f"{stats['output_tokens']:,}",
|
||||
f"{stats['total_tokens']:,}",
|
||||
f"¥{stats['cost_jpy']:,.0f}"
|
||||
)
|
||||
|
||||
# Add total row
|
||||
table.add_row(
|
||||
"──────────",
|
||||
"────────",
|
||||
"────────",
|
||||
"────────",
|
||||
"──────────"
|
||||
)
|
||||
table.add_row(
|
||||
"【Total】",
|
||||
"",
|
||||
"",
|
||||
"",
|
||||
f"¥{total_cost:,.0f}"
|
||||
)
|
||||
|
||||
console.print(table)
|
||||
console.print(f"\n[green]Total estimated cost for {len(recent_dates)} days: ¥{total_cost:,.0f}[/green]")
|
||||
|
||||
|
||||
@tokens_app.command("status")
|
||||
def token_status(
|
||||
claude_dir: Optional[Path] = typer.Option(None, "--claude-dir", help="Claude data directory"),
|
||||
):
|
||||
"""Check Claude Code data availability and basic stats."""
|
||||
|
||||
# Find Claude data directory
|
||||
if claude_dir is None:
|
||||
claude_dir = find_claude_data_dir()
|
||||
|
||||
console.print("[cyan]🔍 Claude Code Data Status[/cyan]")
|
||||
|
||||
if claude_dir is None:
|
||||
console.print("[red]❌ Claude Code data directory not found[/red]")
|
||||
console.print("\n[yellow]Searched locations:[/yellow]")
|
||||
console.print(" • ~/.claude")
|
||||
console.print(" • ~/.config/claude")
|
||||
console.print(" • ./.claude")
|
||||
console.print("\n[dim]Make sure Claude Code is installed and has been used.[/dim]")
|
||||
return
|
||||
|
||||
console.print(f"[green]✅ Found data directory: {claude_dir}[/green]")
|
||||
|
||||
projects_dir = claude_dir / "projects"
|
||||
if not projects_dir.exists():
|
||||
console.print("[yellow]⚠️ No projects directory found[/yellow]")
|
||||
return
|
||||
|
||||
# Count files
|
||||
jsonl_files = list(projects_dir.rglob("*.jsonl"))
|
||||
console.print(f"[blue]📂 Found {len(jsonl_files)} JSONL files[/blue]")
|
||||
|
||||
if jsonl_files:
|
||||
# Parse sample to check data quality
|
||||
sample_records = []
|
||||
for jsonl_file in jsonl_files[:3]: # Check first 3 files
|
||||
try:
|
||||
with open(jsonl_file, 'r') as f:
|
||||
for line in f:
|
||||
if line.strip():
|
||||
try:
|
||||
record = json.loads(line.strip())
|
||||
sample_records.append(record)
|
||||
if len(sample_records) >= 10:
|
||||
break
|
||||
except json.JSONDecodeError:
|
||||
continue
|
||||
if len(sample_records) >= 10:
|
||||
break
|
||||
except IOError:
|
||||
continue
|
||||
|
||||
usage_records = [r for r in sample_records
|
||||
if r.get('type') == 'assistant' and
|
||||
'usage' in r.get('message', {})]
|
||||
|
||||
console.print(f"[green]📊 Found {len(usage_records)} usage records in sample[/green]")
|
||||
|
||||
if usage_records:
|
||||
console.print("[blue]✅ Data appears valid for cost analysis[/blue]")
|
||||
console.print("\n[dim]Run 'aigpt tokens summary' for full analysis[/dim]")
|
||||
else:
|
||||
console.print("[yellow]⚠️ No usage data found in sample[/yellow]")
|
||||
else:
|
||||
console.print("[yellow]⚠️ No JSONL files found[/yellow]")
|
||||
|
||||
|
||||
# Export the tokens app
|
||||
__all__ = ["tokens_app"]
|
184
python_backup/src/aigpt/config.py
Normal file
184
python_backup/src/aigpt/config.py
Normal file
@@ -0,0 +1,184 @@
|
||||
"""Configuration management for ai.gpt"""
|
||||
|
||||
import json
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import Optional, Dict, Any
|
||||
import logging
|
||||
|
||||
|
||||
class Config:
|
||||
"""Manages configuration settings"""
|
||||
|
||||
def __init__(self, config_dir: Optional[Path] = None):
|
||||
if config_dir is None:
|
||||
config_dir = Path.home() / ".config" / "syui" / "ai" / "gpt"
|
||||
|
||||
self.config_dir = config_dir
|
||||
self.config_file = config_dir / "config.json"
|
||||
self.data_dir = config_dir / "data"
|
||||
|
||||
# Create directories if they don't exist
|
||||
self.config_dir.mkdir(parents=True, exist_ok=True)
|
||||
self.data_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
self.logger = logging.getLogger(__name__)
|
||||
self._config: Dict[str, Any] = {}
|
||||
self._load_config()
|
||||
|
||||
def _load_config(self):
|
||||
"""Load configuration from file"""
|
||||
if self.config_file.exists():
|
||||
try:
|
||||
with open(self.config_file, 'r', encoding='utf-8') as f:
|
||||
self._config = json.load(f)
|
||||
except Exception as e:
|
||||
self.logger.error(f"Failed to load config: {e}")
|
||||
self._config = {}
|
||||
else:
|
||||
# Initialize with default config
|
||||
self._config = {
|
||||
"providers": {
|
||||
"openai": {
|
||||
"api_key": None,
|
||||
"default_model": "gpt-4o-mini",
|
||||
"system_prompt": None
|
||||
},
|
||||
"ollama": {
|
||||
"host": "http://localhost:11434",
|
||||
"default_model": "qwen3:latest",
|
||||
"system_prompt": None
|
||||
}
|
||||
},
|
||||
"mcp": {
|
||||
"enabled": True,
|
||||
"auto_detect": True,
|
||||
"servers": {
|
||||
"ai_gpt": {
|
||||
"name": "ai.gpt MCP Server",
|
||||
"base_url": "http://localhost:8001",
|
||||
"endpoints": {
|
||||
"get_memories": "/get_memories",
|
||||
"search_memories": "/search_memories",
|
||||
"get_contextual_memories": "/get_contextual_memories",
|
||||
"process_interaction": "/process_interaction",
|
||||
"get_relationship": "/get_relationship",
|
||||
"get_all_relationships": "/get_all_relationships",
|
||||
"get_persona_state": "/get_persona_state",
|
||||
"get_fortune": "/get_fortune",
|
||||
"run_maintenance": "/run_maintenance",
|
||||
"execute_command": "/execute_command",
|
||||
"analyze_file": "/analyze_file",
|
||||
"remote_shell": "/remote_shell",
|
||||
"ai_bot_status": "/ai_bot_status"
|
||||
},
|
||||
"timeout": 10.0
|
||||
},
|
||||
"ai_card": {
|
||||
"name": "ai.card MCP Server",
|
||||
"base_url": "http://localhost:8000",
|
||||
"endpoints": {
|
||||
"health": "/health",
|
||||
"get_user_cards": "/api/cards/user",
|
||||
"gacha": "/api/gacha",
|
||||
"sync_atproto": "/api/sync"
|
||||
},
|
||||
"timeout": 5.0
|
||||
}
|
||||
}
|
||||
},
|
||||
"atproto": {
|
||||
"handle": None,
|
||||
"password": None,
|
||||
"host": "https://bsky.social"
|
||||
},
|
||||
"default_provider": "ollama"
|
||||
}
|
||||
self._save_config()
|
||||
|
||||
def _save_config(self):
|
||||
"""Save configuration to file"""
|
||||
try:
|
||||
with open(self.config_file, 'w', encoding='utf-8') as f:
|
||||
json.dump(self._config, f, indent=2)
|
||||
except Exception as e:
|
||||
self.logger.error(f"Failed to save config: {e}")
|
||||
|
||||
def get(self, key: str, default: Any = None) -> Any:
|
||||
"""Get configuration value using dot notation"""
|
||||
keys = key.split('.')
|
||||
value = self._config
|
||||
|
||||
for k in keys:
|
||||
if isinstance(value, dict) and k in value:
|
||||
value = value[k]
|
||||
else:
|
||||
return default
|
||||
|
||||
return value
|
||||
|
||||
def set(self, key: str, value: Any):
|
||||
"""Set configuration value using dot notation"""
|
||||
keys = key.split('.')
|
||||
config = self._config
|
||||
|
||||
# Navigate to the parent dictionary
|
||||
for k in keys[:-1]:
|
||||
if k not in config:
|
||||
config[k] = {}
|
||||
config = config[k]
|
||||
|
||||
# Set the value
|
||||
config[keys[-1]] = value
|
||||
self._save_config()
|
||||
|
||||
def delete(self, key: str) -> bool:
|
||||
"""Delete configuration value"""
|
||||
keys = key.split('.')
|
||||
config = self._config
|
||||
|
||||
# Navigate to the parent dictionary
|
||||
for k in keys[:-1]:
|
||||
if k not in config:
|
||||
return False
|
||||
config = config[k]
|
||||
|
||||
# Delete the key if it exists
|
||||
if keys[-1] in config:
|
||||
del config[keys[-1]]
|
||||
self._save_config()
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def list_keys(self, prefix: str = "") -> list[str]:
|
||||
"""List all configuration keys with optional prefix"""
|
||||
def _get_keys(config: dict, current_prefix: str = "") -> list[str]:
|
||||
keys = []
|
||||
for k, v in config.items():
|
||||
full_key = f"{current_prefix}.{k}" if current_prefix else k
|
||||
if isinstance(v, dict):
|
||||
keys.extend(_get_keys(v, full_key))
|
||||
else:
|
||||
keys.append(full_key)
|
||||
return keys
|
||||
|
||||
all_keys = _get_keys(self._config)
|
||||
|
||||
if prefix:
|
||||
return [k for k in all_keys if k.startswith(prefix)]
|
||||
return all_keys
|
||||
|
||||
def get_api_key(self, provider: str) -> Optional[str]:
|
||||
"""Get API key for a specific provider"""
|
||||
key = self.get(f"providers.{provider}.api_key")
|
||||
|
||||
# Also check environment variables
|
||||
if not key and provider == "openai":
|
||||
key = os.getenv("OPENAI_API_KEY")
|
||||
|
||||
return key
|
||||
|
||||
def get_provider_config(self, provider: str) -> Dict[str, Any]:
|
||||
"""Get complete configuration for a provider"""
|
||||
return self.get(f"providers.{provider}", {})
|
1
python_backup/src/aigpt/docs/__init__.py
Normal file
1
python_backup/src/aigpt/docs/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""Documentation management module for ai.gpt."""
|
150
python_backup/src/aigpt/docs/config.py
Normal file
150
python_backup/src/aigpt/docs/config.py
Normal file
@@ -0,0 +1,150 @@
|
||||
"""Configuration management for documentation system."""
|
||||
|
||||
import json
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class GitConfig(BaseModel):
|
||||
"""Git configuration."""
|
||||
host: str = "git.syui.ai"
|
||||
protocol: str = "ssh"
|
||||
|
||||
|
||||
class AtprotoConfig(BaseModel):
|
||||
"""Atproto configuration."""
|
||||
host: str = "syu.is"
|
||||
protocol: str = "at"
|
||||
at_url: str = "at://ai.syu.is"
|
||||
did: str = "did:plc:6qyecktefllvenje24fcxnie"
|
||||
web: str = "https://web.syu.is/@ai"
|
||||
|
||||
|
||||
class ProjectMetadata(BaseModel):
|
||||
"""Project metadata."""
|
||||
last_updated: str
|
||||
structure_version: str
|
||||
domain: List[str]
|
||||
git: GitConfig
|
||||
atproto: AtprotoConfig
|
||||
|
||||
|
||||
class ProjectInfo(BaseModel):
|
||||
"""Individual project information."""
|
||||
type: Union[str, List[str]] # Support both string and list
|
||||
text: str
|
||||
status: str
|
||||
branch: str = "main"
|
||||
git_url: Optional[str] = None
|
||||
detailed_specs: Optional[str] = None
|
||||
data_reference: Optional[str] = None
|
||||
features: Optional[str] = None
|
||||
|
||||
|
||||
class AIConfig(BaseModel):
|
||||
"""AI projects configuration."""
|
||||
ai: ProjectInfo
|
||||
gpt: ProjectInfo
|
||||
os: ProjectInfo
|
||||
game: ProjectInfo
|
||||
bot: ProjectInfo
|
||||
moji: ProjectInfo
|
||||
card: ProjectInfo
|
||||
api: ProjectInfo
|
||||
log: ProjectInfo
|
||||
verse: ProjectInfo
|
||||
shell: ProjectInfo
|
||||
|
||||
|
||||
class DocsConfig(BaseModel):
|
||||
"""Main documentation configuration model."""
|
||||
version: int = 2
|
||||
metadata: ProjectMetadata
|
||||
ai: AIConfig
|
||||
data: Dict[str, Any] = Field(default_factory=dict)
|
||||
deprecated: Dict[str, Any] = Field(default_factory=dict)
|
||||
|
||||
@classmethod
|
||||
def load_from_file(cls, config_path: Path) -> "DocsConfig":
|
||||
"""Load configuration from ai.json file."""
|
||||
if not config_path.exists():
|
||||
raise FileNotFoundError(f"Configuration file not found: {config_path}")
|
||||
|
||||
with open(config_path, "r", encoding="utf-8") as f:
|
||||
data = json.load(f)
|
||||
|
||||
return cls(**data)
|
||||
|
||||
def get_project_info(self, project_name: str) -> Optional[ProjectInfo]:
|
||||
"""Get project information by name."""
|
||||
return getattr(self.ai, project_name, None)
|
||||
|
||||
def get_project_git_url(self, project_name: str) -> str:
|
||||
"""Get git URL for project."""
|
||||
project = self.get_project_info(project_name)
|
||||
if project and project.git_url:
|
||||
return project.git_url
|
||||
|
||||
# Construct URL from metadata
|
||||
host = self.metadata.git.host
|
||||
protocol = self.metadata.git.protocol
|
||||
|
||||
if protocol == "ssh":
|
||||
return f"git@{host}:ai/{project_name}"
|
||||
else:
|
||||
return f"https://{host}/ai/{project_name}"
|
||||
|
||||
def get_project_branch(self, project_name: str) -> str:
|
||||
"""Get branch for project."""
|
||||
project = self.get_project_info(project_name)
|
||||
return project.branch if project else "main"
|
||||
|
||||
def list_projects(self) -> List[str]:
|
||||
"""List all available projects."""
|
||||
return list(self.ai.__fields__.keys())
|
||||
|
||||
|
||||
def get_ai_root(custom_dir: Optional[Path] = None) -> Path:
|
||||
"""Get AI ecosystem root directory.
|
||||
|
||||
Priority order:
|
||||
1. --dir option (custom_dir parameter)
|
||||
2. AI_DOCS_DIR environment variable
|
||||
3. ai.gpt config file (docs.ai_root)
|
||||
4. Default relative path
|
||||
"""
|
||||
if custom_dir:
|
||||
return custom_dir
|
||||
|
||||
# Check environment variable
|
||||
import os
|
||||
env_dir = os.getenv("AI_DOCS_DIR")
|
||||
if env_dir:
|
||||
return Path(env_dir)
|
||||
|
||||
# Check ai.gpt config file
|
||||
try:
|
||||
from ..config import Config
|
||||
config = Config()
|
||||
config_ai_root = config.get("docs.ai_root")
|
||||
if config_ai_root:
|
||||
return Path(config_ai_root).expanduser()
|
||||
except Exception:
|
||||
# If config loading fails, continue to default
|
||||
pass
|
||||
|
||||
# Default: From gpt/src/aigpt/docs/config.py, go up to ai/ root
|
||||
return Path(__file__).parent.parent.parent.parent.parent
|
||||
|
||||
|
||||
def get_claude_root(custom_dir: Optional[Path] = None) -> Path:
|
||||
"""Get Claude documentation root directory."""
|
||||
return get_ai_root(custom_dir) / "claude"
|
||||
|
||||
|
||||
def load_docs_config(custom_dir: Optional[Path] = None) -> DocsConfig:
|
||||
"""Load documentation configuration."""
|
||||
config_path = get_ai_root(custom_dir) / "ai.json"
|
||||
return DocsConfig.load_from_file(config_path)
|
397
python_backup/src/aigpt/docs/git_utils.py
Normal file
397
python_backup/src/aigpt/docs/git_utils.py
Normal file
@@ -0,0 +1,397 @@
|
||||
"""Git utilities for documentation management."""
|
||||
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
from typing import List, Optional, Tuple
|
||||
|
||||
from rich.console import Console
|
||||
from rich.progress import track
|
||||
|
||||
from .utils import run_command
|
||||
|
||||
console = Console()
|
||||
|
||||
|
||||
def check_git_repository(path: Path) -> bool:
|
||||
"""Check if path is a git repository."""
|
||||
return (path / ".git").exists()
|
||||
|
||||
|
||||
def get_submodules_status(repo_path: Path) -> List[dict]:
|
||||
"""Get status of all submodules."""
|
||||
if not check_git_repository(repo_path):
|
||||
return []
|
||||
|
||||
returncode, stdout, stderr = run_command(
|
||||
["git", "submodule", "status"],
|
||||
cwd=repo_path
|
||||
)
|
||||
|
||||
if returncode != 0:
|
||||
return []
|
||||
|
||||
submodules = []
|
||||
for line in stdout.strip().splitlines():
|
||||
if line.strip():
|
||||
# Parse git submodule status output
|
||||
# Format: " commit_hash path (tag)" or "-commit_hash path" (not initialized)
|
||||
parts = line.strip().split()
|
||||
if len(parts) >= 2:
|
||||
status_char = line[0] if line else ' '
|
||||
commit = parts[0].lstrip('-+ ')
|
||||
path = parts[1]
|
||||
|
||||
submodules.append({
|
||||
"path": path,
|
||||
"commit": commit,
|
||||
"initialized": status_char != '-',
|
||||
"modified": status_char == '+',
|
||||
"status": status_char
|
||||
})
|
||||
|
||||
return submodules
|
||||
|
||||
|
||||
def init_and_update_submodules(repo_path: Path, specific_paths: Optional[List[str]] = None) -> Tuple[bool, str]:
|
||||
"""Initialize and update submodules."""
|
||||
if not check_git_repository(repo_path):
|
||||
return False, "Not a git repository"
|
||||
|
||||
try:
|
||||
# Initialize submodules
|
||||
console.print("[blue]🔧 Initializing submodules...[/blue]")
|
||||
returncode, stdout, stderr = run_command(
|
||||
["git", "submodule", "init"],
|
||||
cwd=repo_path
|
||||
)
|
||||
|
||||
if returncode != 0:
|
||||
return False, f"Failed to initialize submodules: {stderr}"
|
||||
|
||||
# Update submodules
|
||||
console.print("[blue]📦 Updating submodules...[/blue]")
|
||||
|
||||
if specific_paths:
|
||||
# Update specific submodules
|
||||
for path in specific_paths:
|
||||
console.print(f"[dim]Updating {path}...[/dim]")
|
||||
returncode, stdout, stderr = run_command(
|
||||
["git", "submodule", "update", "--init", "--recursive", path],
|
||||
cwd=repo_path
|
||||
)
|
||||
|
||||
if returncode != 0:
|
||||
return False, f"Failed to update submodule {path}: {stderr}"
|
||||
else:
|
||||
# Update all submodules
|
||||
returncode, stdout, stderr = run_command(
|
||||
["git", "submodule", "update", "--init", "--recursive"],
|
||||
cwd=repo_path
|
||||
)
|
||||
|
||||
if returncode != 0:
|
||||
return False, f"Failed to update submodules: {stderr}"
|
||||
|
||||
console.print("[green]✅ Submodules updated successfully[/green]")
|
||||
return True, "Submodules updated successfully"
|
||||
|
||||
except Exception as e:
|
||||
return False, f"Error updating submodules: {str(e)}"
|
||||
|
||||
|
||||
def clone_missing_submodules(repo_path: Path, ai_config) -> Tuple[bool, List[str]]:
|
||||
"""Clone missing submodules based on ai.json configuration."""
|
||||
if not check_git_repository(repo_path):
|
||||
return False, ["Not a git repository"]
|
||||
|
||||
try:
|
||||
# Get current submodules
|
||||
current_submodules = get_submodules_status(repo_path)
|
||||
current_paths = {sub["path"] for sub in current_submodules}
|
||||
|
||||
# Get expected projects from ai.json
|
||||
expected_projects = ai_config.list_projects()
|
||||
|
||||
# Find missing submodules
|
||||
missing_submodules = []
|
||||
for project in expected_projects:
|
||||
if project not in current_paths:
|
||||
# Check if directory exists but is not a submodule
|
||||
project_path = repo_path / project
|
||||
if not project_path.exists():
|
||||
missing_submodules.append(project)
|
||||
|
||||
if not missing_submodules:
|
||||
console.print("[green]✅ All submodules are present[/green]")
|
||||
return True, []
|
||||
|
||||
console.print(f"[yellow]📋 Found {len(missing_submodules)} missing submodules: {missing_submodules}[/yellow]")
|
||||
|
||||
# Clone missing submodules
|
||||
cloned = []
|
||||
for project in track(missing_submodules, description="Cloning missing submodules..."):
|
||||
git_url = ai_config.get_project_git_url(project)
|
||||
branch = ai_config.get_project_branch(project)
|
||||
|
||||
console.print(f"[blue]📦 Adding submodule: {project}[/blue]")
|
||||
console.print(f"[dim]URL: {git_url}[/dim]")
|
||||
console.print(f"[dim]Branch: {branch}[/dim]")
|
||||
|
||||
returncode, stdout, stderr = run_command(
|
||||
["git", "submodule", "add", "-b", branch, git_url, project],
|
||||
cwd=repo_path
|
||||
)
|
||||
|
||||
if returncode == 0:
|
||||
cloned.append(project)
|
||||
console.print(f"[green]✅ Added {project}[/green]")
|
||||
else:
|
||||
console.print(f"[red]❌ Failed to add {project}: {stderr}[/red]")
|
||||
|
||||
if cloned:
|
||||
console.print(f"[green]🎉 Successfully cloned {len(cloned)} submodules[/green]")
|
||||
|
||||
return True, cloned
|
||||
|
||||
except Exception as e:
|
||||
return False, [f"Error cloning submodules: {str(e)}"]
|
||||
|
||||
|
||||
def ensure_submodules_available(repo_path: Path, ai_config, auto_clone: bool = True) -> Tuple[bool, List[str]]:
|
||||
"""Ensure all submodules are available, optionally cloning missing ones."""
|
||||
console.print("[blue]🔍 Checking submodule status...[/blue]")
|
||||
|
||||
# Get current submodule status
|
||||
submodules = get_submodules_status(repo_path)
|
||||
|
||||
# Check for uninitialized submodules
|
||||
uninitialized = [sub for sub in submodules if not sub["initialized"]]
|
||||
|
||||
if uninitialized:
|
||||
console.print(f"[yellow]📦 Found {len(uninitialized)} uninitialized submodules[/yellow]")
|
||||
if auto_clone:
|
||||
success, message = init_and_update_submodules(
|
||||
repo_path,
|
||||
[sub["path"] for sub in uninitialized]
|
||||
)
|
||||
if not success:
|
||||
return False, [message]
|
||||
else:
|
||||
return False, [f"Uninitialized submodules: {[sub['path'] for sub in uninitialized]}"]
|
||||
|
||||
# Check for missing submodules (not in .gitmodules but expected)
|
||||
if auto_clone:
|
||||
success, cloned = clone_missing_submodules(repo_path, ai_config)
|
||||
if not success:
|
||||
return False, cloned
|
||||
|
||||
# If we cloned new submodules, update all to be safe
|
||||
if cloned:
|
||||
success, message = init_and_update_submodules(repo_path)
|
||||
if not success:
|
||||
return False, [message]
|
||||
|
||||
return True, []
|
||||
|
||||
|
||||
def get_git_branch(repo_path: Path) -> Optional[str]:
|
||||
"""Get current git branch."""
|
||||
if not check_git_repository(repo_path):
|
||||
return None
|
||||
|
||||
returncode, stdout, stderr = run_command(
|
||||
["git", "branch", "--show-current"],
|
||||
cwd=repo_path
|
||||
)
|
||||
|
||||
if returncode == 0:
|
||||
return stdout.strip()
|
||||
return None
|
||||
|
||||
|
||||
def get_git_remote_url(repo_path: Path, remote: str = "origin") -> Optional[str]:
|
||||
"""Get git remote URL."""
|
||||
if not check_git_repository(repo_path):
|
||||
return None
|
||||
|
||||
returncode, stdout, stderr = run_command(
|
||||
["git", "remote", "get-url", remote],
|
||||
cwd=repo_path
|
||||
)
|
||||
|
||||
if returncode == 0:
|
||||
return stdout.strip()
|
||||
return None
|
||||
|
||||
|
||||
def pull_repository(repo_path: Path, branch: Optional[str] = None) -> Tuple[bool, str]:
|
||||
"""Pull latest changes from remote repository."""
|
||||
if not check_git_repository(repo_path):
|
||||
return False, "Not a git repository"
|
||||
|
||||
try:
|
||||
# Get current branch if not specified
|
||||
if branch is None:
|
||||
branch = get_git_branch(repo_path)
|
||||
if not branch:
|
||||
# If in detached HEAD state, try to switch to main
|
||||
console.print("[yellow]⚠️ Repository in detached HEAD state, switching to main...[/yellow]")
|
||||
returncode, stdout, stderr = run_command(
|
||||
["git", "checkout", "main"],
|
||||
cwd=repo_path
|
||||
)
|
||||
if returncode == 0:
|
||||
branch = "main"
|
||||
console.print("[green]✅ Switched to main branch[/green]")
|
||||
else:
|
||||
return False, f"Could not switch to main branch: {stderr}"
|
||||
|
||||
console.print(f"[blue]📥 Pulling latest changes for branch: {branch}[/blue]")
|
||||
|
||||
# Check if we have uncommitted changes
|
||||
returncode, stdout, stderr = run_command(
|
||||
["git", "status", "--porcelain"],
|
||||
cwd=repo_path
|
||||
)
|
||||
|
||||
if returncode == 0 and stdout.strip():
|
||||
console.print("[yellow]⚠️ Repository has uncommitted changes[/yellow]")
|
||||
console.print("[dim]Consider committing changes before pull[/dim]")
|
||||
# Continue anyway, git will handle conflicts
|
||||
|
||||
# Fetch latest changes
|
||||
console.print("[dim]Fetching from remote...[/dim]")
|
||||
returncode, stdout, stderr = run_command(
|
||||
["git", "fetch", "origin"],
|
||||
cwd=repo_path
|
||||
)
|
||||
|
||||
if returncode != 0:
|
||||
return False, f"Failed to fetch: {stderr}"
|
||||
|
||||
# Pull changes
|
||||
returncode, stdout, stderr = run_command(
|
||||
["git", "pull", "origin", branch],
|
||||
cwd=repo_path
|
||||
)
|
||||
|
||||
if returncode != 0:
|
||||
# Check if it's a merge conflict
|
||||
if "CONFLICT" in stderr or "conflict" in stderr.lower():
|
||||
return False, f"Merge conflicts detected: {stderr}"
|
||||
return False, f"Failed to pull: {stderr}"
|
||||
|
||||
# Check if there were any changes
|
||||
if "Already up to date" in stdout or "Already up-to-date" in stdout:
|
||||
console.print("[green]✅ Repository already up to date[/green]")
|
||||
else:
|
||||
console.print("[green]✅ Successfully pulled latest changes[/green]")
|
||||
if stdout.strip():
|
||||
console.print(f"[dim]{stdout.strip()}[/dim]")
|
||||
|
||||
return True, "Successfully pulled latest changes"
|
||||
|
||||
except Exception as e:
|
||||
return False, f"Error pulling repository: {str(e)}"
|
||||
|
||||
|
||||
def pull_wiki_repository(wiki_path: Path) -> Tuple[bool, str]:
|
||||
"""Pull latest changes from wiki repository before generating content."""
|
||||
if not wiki_path.exists():
|
||||
return False, f"Wiki directory not found: {wiki_path}"
|
||||
|
||||
if not check_git_repository(wiki_path):
|
||||
return False, f"Wiki directory is not a git repository: {wiki_path}"
|
||||
|
||||
console.print(f"[blue]📚 Updating wiki repository: {wiki_path.name}[/blue]")
|
||||
|
||||
return pull_repository(wiki_path)
|
||||
|
||||
|
||||
def push_repository(repo_path: Path, branch: Optional[str] = None, commit_message: Optional[str] = None) -> Tuple[bool, str]:
|
||||
"""Commit and push changes to remote repository."""
|
||||
if not check_git_repository(repo_path):
|
||||
return False, "Not a git repository"
|
||||
|
||||
try:
|
||||
# Get current branch if not specified
|
||||
if branch is None:
|
||||
branch = get_git_branch(repo_path)
|
||||
if not branch:
|
||||
return False, "Could not determine current branch"
|
||||
|
||||
# Check if we have any changes to commit
|
||||
returncode, stdout, stderr = run_command(
|
||||
["git", "status", "--porcelain"],
|
||||
cwd=repo_path
|
||||
)
|
||||
|
||||
if returncode != 0:
|
||||
return False, f"Failed to check git status: {stderr}"
|
||||
|
||||
if not stdout.strip():
|
||||
console.print("[green]✅ No changes to commit[/green]")
|
||||
return True, "No changes to commit"
|
||||
|
||||
console.print(f"[blue]📝 Committing changes in: {repo_path.name}[/blue]")
|
||||
|
||||
# Add all changes
|
||||
returncode, stdout, stderr = run_command(
|
||||
["git", "add", "."],
|
||||
cwd=repo_path
|
||||
)
|
||||
|
||||
if returncode != 0:
|
||||
return False, f"Failed to add changes: {stderr}"
|
||||
|
||||
# Commit changes
|
||||
if commit_message is None:
|
||||
commit_message = f"Update wiki content - {Path().cwd().name} documentation sync"
|
||||
|
||||
returncode, stdout, stderr = run_command(
|
||||
["git", "commit", "-m", commit_message],
|
||||
cwd=repo_path
|
||||
)
|
||||
|
||||
if returncode != 0:
|
||||
# Check if there were no changes to commit
|
||||
if "nothing to commit" in stderr or "nothing added to commit" in stderr:
|
||||
console.print("[green]✅ No changes to commit[/green]")
|
||||
return True, "No changes to commit"
|
||||
return False, f"Failed to commit changes: {stderr}"
|
||||
|
||||
console.print(f"[blue]📤 Pushing to remote branch: {branch}[/blue]")
|
||||
|
||||
# Push to remote
|
||||
returncode, stdout, stderr = run_command(
|
||||
["git", "push", "origin", branch],
|
||||
cwd=repo_path
|
||||
)
|
||||
|
||||
if returncode != 0:
|
||||
return False, f"Failed to push: {stderr}"
|
||||
|
||||
console.print("[green]✅ Successfully pushed changes to remote[/green]")
|
||||
if stdout.strip():
|
||||
console.print(f"[dim]{stdout.strip()}[/dim]")
|
||||
|
||||
return True, "Successfully committed and pushed changes"
|
||||
|
||||
except Exception as e:
|
||||
return False, f"Error pushing repository: {str(e)}"
|
||||
|
||||
|
||||
def push_wiki_repository(wiki_path: Path, commit_message: Optional[str] = None) -> Tuple[bool, str]:
|
||||
"""Commit and push changes to wiki repository after generating content."""
|
||||
if not wiki_path.exists():
|
||||
return False, f"Wiki directory not found: {wiki_path}"
|
||||
|
||||
if not check_git_repository(wiki_path):
|
||||
return False, f"Wiki directory is not a git repository: {wiki_path}"
|
||||
|
||||
console.print(f"[blue]📚 Pushing wiki repository: {wiki_path.name}[/blue]")
|
||||
|
||||
if commit_message is None:
|
||||
commit_message = "Auto-update wiki content from ai.gpt docs"
|
||||
|
||||
return push_repository(wiki_path, branch="main", commit_message=commit_message)
|
158
python_backup/src/aigpt/docs/templates.py
Normal file
158
python_backup/src/aigpt/docs/templates.py
Normal file
@@ -0,0 +1,158 @@
|
||||
"""Template management for documentation generation."""
|
||||
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
|
||||
from .config import DocsConfig, get_claude_root
|
||||
|
||||
|
||||
class DocumentationTemplateManager:
|
||||
"""Manages Jinja2 templates for documentation generation."""
|
||||
|
||||
def __init__(self, config: DocsConfig):
|
||||
self.config = config
|
||||
self.claude_root = get_claude_root()
|
||||
self.templates_dir = self.claude_root / "templates"
|
||||
self.core_dir = self.claude_root / "core"
|
||||
self.projects_dir = self.claude_root / "projects"
|
||||
|
||||
# Setup Jinja2 environment
|
||||
self.env = Environment(
|
||||
loader=FileSystemLoader([
|
||||
str(self.templates_dir),
|
||||
str(self.core_dir),
|
||||
str(self.projects_dir),
|
||||
]),
|
||||
trim_blocks=True,
|
||||
lstrip_blocks=True,
|
||||
)
|
||||
|
||||
# Add custom filters
|
||||
self.env.filters["timestamp"] = self._timestamp_filter
|
||||
|
||||
def _timestamp_filter(self, format_str: str = "%Y-%m-%d %H:%M:%S") -> str:
|
||||
"""Jinja2 filter for timestamps."""
|
||||
return datetime.now().strftime(format_str)
|
||||
|
||||
def get_template_context(self, project_name: str, components: List[str]) -> Dict:
|
||||
"""Get template context for documentation generation."""
|
||||
project_info = self.config.get_project_info(project_name)
|
||||
|
||||
return {
|
||||
"config": self.config,
|
||||
"project_name": project_name,
|
||||
"project_info": project_info,
|
||||
"components": components,
|
||||
"timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
|
||||
"ai_md_content": self._get_ai_md_content(),
|
||||
}
|
||||
|
||||
def _get_ai_md_content(self) -> Optional[str]:
|
||||
"""Get content from ai.md file."""
|
||||
ai_md_path = self.claude_root.parent / "ai.md"
|
||||
if ai_md_path.exists():
|
||||
return ai_md_path.read_text(encoding="utf-8")
|
||||
return None
|
||||
|
||||
def render_component(self, component_name: str, context: Dict) -> str:
|
||||
"""Render a specific component."""
|
||||
component_files = {
|
||||
"core": ["philosophy.md", "naming.md", "architecture.md"],
|
||||
"philosophy": ["philosophy.md"],
|
||||
"naming": ["naming.md"],
|
||||
"architecture": ["architecture.md"],
|
||||
"specific": [f"{context['project_name']}.md"],
|
||||
}
|
||||
|
||||
if component_name not in component_files:
|
||||
raise ValueError(f"Unknown component: {component_name}")
|
||||
|
||||
content_parts = []
|
||||
|
||||
for file_name in component_files[component_name]:
|
||||
file_path = self.core_dir / file_name
|
||||
if component_name == "specific":
|
||||
file_path = self.projects_dir / file_name
|
||||
|
||||
if file_path.exists():
|
||||
content = file_path.read_text(encoding="utf-8")
|
||||
content_parts.append(content)
|
||||
|
||||
return "\n\n".join(content_parts)
|
||||
|
||||
def generate_documentation(
|
||||
self,
|
||||
project_name: str,
|
||||
components: List[str],
|
||||
output_path: Optional[Path] = None,
|
||||
) -> str:
|
||||
"""Generate complete documentation."""
|
||||
context = self.get_template_context(project_name, components)
|
||||
|
||||
# Build content sections
|
||||
content_sections = []
|
||||
|
||||
# Add ai.md header if available
|
||||
if context["ai_md_content"]:
|
||||
content_sections.append(context["ai_md_content"])
|
||||
content_sections.append("---\n")
|
||||
|
||||
# Add title and metadata
|
||||
content_sections.append("# エコシステム統合設計書(詳細版)\n")
|
||||
content_sections.append("このドキュメントは動的生成されました。修正は元ファイルで行ってください。\n")
|
||||
content_sections.append(f"生成日時: {context['timestamp']}")
|
||||
content_sections.append(f"対象プロジェクト: {project_name}")
|
||||
content_sections.append(f"含有コンポーネント: {','.join(components)}\n")
|
||||
|
||||
# Add component content
|
||||
for component in components:
|
||||
try:
|
||||
component_content = self.render_component(component, context)
|
||||
if component_content.strip():
|
||||
content_sections.append(component_content)
|
||||
except ValueError as e:
|
||||
print(f"Warning: {e}")
|
||||
|
||||
# Add footer
|
||||
footer = """
|
||||
# footer
|
||||
|
||||
© syui
|
||||
|
||||
# important-instruction-reminders
|
||||
Do what has been asked; nothing more, nothing less.
|
||||
NEVER create files unless they're absolutely necessary for achieving your goal.
|
||||
ALWAYS prefer editing an existing file to creating a new one.
|
||||
NEVER proactively create documentation files (*.md) or README files. Only create documentation files if explicitly requested by the User.
|
||||
"""
|
||||
content_sections.append(footer)
|
||||
|
||||
# Join all sections
|
||||
final_content = "\n".join(content_sections)
|
||||
|
||||
# Write to file if output path provided
|
||||
if output_path:
|
||||
output_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
output_path.write_text(final_content, encoding="utf-8")
|
||||
|
||||
return final_content
|
||||
|
||||
def list_available_components(self) -> List[str]:
|
||||
"""List available components."""
|
||||
return ["core", "philosophy", "naming", "architecture", "specific"]
|
||||
|
||||
def validate_components(self, components: List[str]) -> List[str]:
|
||||
"""Validate and return valid components."""
|
||||
available = self.list_available_components()
|
||||
valid_components = []
|
||||
|
||||
for component in components:
|
||||
if component in available:
|
||||
valid_components.append(component)
|
||||
else:
|
||||
print(f"Warning: Unknown component '{component}' (available: {available})")
|
||||
|
||||
return valid_components or ["core", "specific"] # Default fallback
|
178
python_backup/src/aigpt/docs/utils.py
Normal file
178
python_backup/src/aigpt/docs/utils.py
Normal file
@@ -0,0 +1,178 @@
|
||||
"""Utility functions for documentation management."""
|
||||
|
||||
import subprocess
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import List, Optional, Tuple
|
||||
|
||||
from rich.console import Console
|
||||
from rich.progress import Progress, SpinnerColumn, TextColumn
|
||||
|
||||
console = Console()
|
||||
|
||||
|
||||
def run_command(
|
||||
cmd: List[str],
|
||||
cwd: Optional[Path] = None,
|
||||
capture_output: bool = True,
|
||||
verbose: bool = False,
|
||||
) -> Tuple[int, str, str]:
|
||||
"""Run a command and return exit code, stdout, stderr."""
|
||||
if verbose:
|
||||
console.print(f"[dim]Running: {' '.join(cmd)}[/dim]")
|
||||
|
||||
try:
|
||||
result = subprocess.run(
|
||||
cmd,
|
||||
cwd=cwd,
|
||||
capture_output=capture_output,
|
||||
text=True,
|
||||
check=False,
|
||||
)
|
||||
return result.returncode, result.stdout, result.stderr
|
||||
except FileNotFoundError:
|
||||
return 1, "", f"Command not found: {cmd[0]}"
|
||||
|
||||
|
||||
def is_git_repository(path: Path) -> bool:
|
||||
"""Check if path is a git repository."""
|
||||
return (path / ".git").exists()
|
||||
|
||||
|
||||
def get_git_status(repo_path: Path) -> Tuple[bool, List[str]]:
|
||||
"""Get git status for repository."""
|
||||
if not is_git_repository(repo_path):
|
||||
return False, ["Not a git repository"]
|
||||
|
||||
returncode, stdout, stderr = run_command(
|
||||
["git", "status", "--porcelain"],
|
||||
cwd=repo_path
|
||||
)
|
||||
|
||||
if returncode != 0:
|
||||
return False, [stderr.strip()]
|
||||
|
||||
changes = [line.strip() for line in stdout.splitlines() if line.strip()]
|
||||
return len(changes) == 0, changes
|
||||
|
||||
|
||||
def validate_project_name(project_name: str, available_projects: List[str]) -> bool:
|
||||
"""Validate project name against available projects."""
|
||||
return project_name in available_projects
|
||||
|
||||
|
||||
def format_file_size(size_bytes: int) -> str:
|
||||
"""Format file size in human readable format."""
|
||||
for unit in ['B', 'KB', 'MB', 'GB']:
|
||||
if size_bytes < 1024.0:
|
||||
return f"{size_bytes:.1f}{unit}"
|
||||
size_bytes /= 1024.0
|
||||
return f"{size_bytes:.1f}TB"
|
||||
|
||||
|
||||
def count_lines(file_path: Path) -> int:
|
||||
"""Count lines in a file."""
|
||||
try:
|
||||
with open(file_path, 'r', encoding='utf-8') as f:
|
||||
return sum(1 for _ in f)
|
||||
except (OSError, UnicodeDecodeError):
|
||||
return 0
|
||||
|
||||
|
||||
def find_project_directories(base_path: Path, projects: List[str]) -> dict:
|
||||
"""Find project directories relative to base path."""
|
||||
project_dirs = {}
|
||||
|
||||
# Look for directories matching project names
|
||||
for project in projects:
|
||||
project_path = base_path / project
|
||||
if project_path.exists() and project_path.is_dir():
|
||||
project_dirs[project] = project_path
|
||||
|
||||
return project_dirs
|
||||
|
||||
|
||||
def check_command_available(command: str) -> bool:
|
||||
"""Check if a command is available in PATH."""
|
||||
try:
|
||||
subprocess.run([command, "--version"],
|
||||
capture_output=True,
|
||||
check=True)
|
||||
return True
|
||||
except (subprocess.CalledProcessError, FileNotFoundError):
|
||||
return False
|
||||
|
||||
|
||||
def get_platform_info() -> dict:
|
||||
"""Get platform information."""
|
||||
import platform
|
||||
|
||||
return {
|
||||
"system": platform.system(),
|
||||
"release": platform.release(),
|
||||
"machine": platform.machine(),
|
||||
"python_version": platform.python_version(),
|
||||
"python_implementation": platform.python_implementation(),
|
||||
}
|
||||
|
||||
|
||||
class ProgressManager:
|
||||
"""Context manager for rich progress bars."""
|
||||
|
||||
def __init__(self, description: str = "Processing..."):
|
||||
self.description = description
|
||||
self.progress = None
|
||||
self.task = None
|
||||
|
||||
def __enter__(self):
|
||||
self.progress = Progress(
|
||||
SpinnerColumn(),
|
||||
TextColumn("[progress.description]{task.description}"),
|
||||
console=console,
|
||||
)
|
||||
self.progress.start()
|
||||
self.task = self.progress.add_task(self.description, total=None)
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
if self.progress:
|
||||
self.progress.stop()
|
||||
|
||||
def update(self, description: str):
|
||||
"""Update progress description."""
|
||||
if self.progress and self.task is not None:
|
||||
self.progress.update(self.task, description=description)
|
||||
|
||||
|
||||
def safe_write_file(file_path: Path, content: str, backup: bool = True) -> bool:
|
||||
"""Safely write content to file with optional backup."""
|
||||
try:
|
||||
# Create backup if file exists and backup requested
|
||||
if backup and file_path.exists():
|
||||
backup_path = file_path.with_suffix(file_path.suffix + ".bak")
|
||||
backup_path.write_text(file_path.read_text(), encoding="utf-8")
|
||||
|
||||
# Ensure parent directory exists
|
||||
file_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Write content
|
||||
file_path.write_text(content, encoding="utf-8")
|
||||
return True
|
||||
|
||||
except (OSError, UnicodeError) as e:
|
||||
console.print(f"[red]Error writing file {file_path}: {e}[/red]")
|
||||
return False
|
||||
|
||||
|
||||
def confirm_action(message: str, default: bool = False) -> bool:
|
||||
"""Ask user for confirmation."""
|
||||
if not sys.stdin.isatty():
|
||||
return default
|
||||
|
||||
suffix = " [Y/n]: " if default else " [y/N]: "
|
||||
response = input(message + suffix).strip().lower()
|
||||
|
||||
if not response:
|
||||
return default
|
||||
|
||||
return response in ('y', 'yes', 'true', '1')
|
314
python_backup/src/aigpt/docs/wiki_generator.py
Normal file
314
python_backup/src/aigpt/docs/wiki_generator.py
Normal file
@@ -0,0 +1,314 @@
|
||||
"""Wiki generation utilities for ai.wiki management."""
|
||||
|
||||
import re
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional, Tuple
|
||||
|
||||
from rich.console import Console
|
||||
|
||||
from .config import DocsConfig, get_ai_root
|
||||
from .utils import find_project_directories
|
||||
from .git_utils import pull_wiki_repository, push_wiki_repository
|
||||
|
||||
console = Console()
|
||||
|
||||
|
||||
class WikiGenerator:
|
||||
"""Generates wiki content from project documentation."""
|
||||
|
||||
def __init__(self, config: DocsConfig, ai_root: Path):
|
||||
self.config = config
|
||||
self.ai_root = ai_root
|
||||
self.wiki_root = ai_root / "ai.wiki" if (ai_root / "ai.wiki").exists() else None
|
||||
|
||||
def extract_project_summary(self, project_md_path: Path) -> Dict[str, str]:
|
||||
"""Extract key information from claude/projects/${repo}.md file."""
|
||||
if not project_md_path.exists():
|
||||
return {"title": "No documentation", "summary": "Project documentation not found", "status": "Unknown"}
|
||||
|
||||
try:
|
||||
content = project_md_path.read_text(encoding="utf-8")
|
||||
|
||||
# Extract title (first # heading)
|
||||
title_match = re.search(r'^# (.+)$', content, re.MULTILINE)
|
||||
title = title_match.group(1) if title_match else "Unknown Project"
|
||||
|
||||
# Extract project overview/summary (look for specific patterns)
|
||||
summary = self._extract_summary_section(content)
|
||||
|
||||
# Extract status information
|
||||
status = self._extract_status_info(content)
|
||||
|
||||
# Extract key features/goals
|
||||
features = self._extract_features(content)
|
||||
|
||||
return {
|
||||
"title": title,
|
||||
"summary": summary,
|
||||
"status": status,
|
||||
"features": features,
|
||||
"last_updated": self._get_last_updated_info(content)
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
console.print(f"[yellow]Warning: Failed to parse {project_md_path}: {e}[/yellow]")
|
||||
return {"title": "Parse Error", "summary": str(e), "status": "Error"}
|
||||
|
||||
def _extract_summary_section(self, content: str) -> str:
|
||||
"""Extract summary or overview section."""
|
||||
# Look for common summary patterns
|
||||
patterns = [
|
||||
r'## 概要\s*\n(.*?)(?=\n##|\n#|\Z)',
|
||||
r'## Overview\s*\n(.*?)(?=\n##|\n#|\Z)',
|
||||
r'## プロジェクト概要\s*\n(.*?)(?=\n##|\n#|\Z)',
|
||||
r'\*\*目的\*\*: (.+?)(?=\n|$)',
|
||||
r'\*\*中核概念\*\*:\s*\n(.*?)(?=\n##|\n#|\Z)',
|
||||
]
|
||||
|
||||
for pattern in patterns:
|
||||
match = re.search(pattern, content, re.DOTALL | re.MULTILINE)
|
||||
if match:
|
||||
summary = match.group(1).strip()
|
||||
# Clean up and truncate
|
||||
summary = re.sub(r'\n+', ' ', summary)
|
||||
summary = re.sub(r'\s+', ' ', summary)
|
||||
return summary[:300] + "..." if len(summary) > 300 else summary
|
||||
|
||||
# Fallback: first paragraph after title
|
||||
lines = content.split('\n')
|
||||
summary_lines = []
|
||||
found_content = False
|
||||
|
||||
for line in lines:
|
||||
line = line.strip()
|
||||
if not line:
|
||||
if found_content and summary_lines:
|
||||
break
|
||||
continue
|
||||
if line.startswith('#'):
|
||||
found_content = True
|
||||
continue
|
||||
if found_content and not line.startswith('*') and not line.startswith('-'):
|
||||
summary_lines.append(line)
|
||||
if len(' '.join(summary_lines)) > 200:
|
||||
break
|
||||
|
||||
return ' '.join(summary_lines)[:300] + "..." if summary_lines else "No summary available"
|
||||
|
||||
def _extract_status_info(self, content: str) -> str:
|
||||
"""Extract status information."""
|
||||
# Look for status patterns
|
||||
patterns = [
|
||||
r'\*\*状況\*\*: (.+?)(?=\n|$)',
|
||||
r'\*\*Status\*\*: (.+?)(?=\n|$)',
|
||||
r'\*\*現在の状況\*\*: (.+?)(?=\n|$)',
|
||||
r'- \*\*状況\*\*: (.+?)(?=\n|$)',
|
||||
]
|
||||
|
||||
for pattern in patterns:
|
||||
match = re.search(pattern, content)
|
||||
if match:
|
||||
return match.group(1).strip()
|
||||
|
||||
return "No status information"
|
||||
|
||||
def _extract_features(self, content: str) -> List[str]:
|
||||
"""Extract key features or bullet points."""
|
||||
features = []
|
||||
|
||||
# Look for bullet point lists
|
||||
lines = content.split('\n')
|
||||
in_list = False
|
||||
|
||||
for line in lines:
|
||||
line = line.strip()
|
||||
if line.startswith('- ') or line.startswith('* '):
|
||||
feature = line[2:].strip()
|
||||
if len(feature) > 10 and not feature.startswith('**'): # Skip metadata
|
||||
features.append(feature)
|
||||
in_list = True
|
||||
if len(features) >= 5: # Limit to 5 features
|
||||
break
|
||||
elif in_list and not line:
|
||||
break
|
||||
|
||||
return features
|
||||
|
||||
def _get_last_updated_info(self, content: str) -> str:
|
||||
"""Extract last updated information."""
|
||||
patterns = [
|
||||
r'生成日時: (.+?)(?=\n|$)',
|
||||
r'最終更新: (.+?)(?=\n|$)',
|
||||
r'Last updated: (.+?)(?=\n|$)',
|
||||
]
|
||||
|
||||
for pattern in patterns:
|
||||
match = re.search(pattern, content)
|
||||
if match:
|
||||
return match.group(1).strip()
|
||||
|
||||
return "Unknown"
|
||||
|
||||
def generate_project_wiki_page(self, project_name: str, project_info: Dict[str, str]) -> str:
|
||||
"""Generate wiki page for a single project."""
|
||||
config_info = self.config.get_project_info(project_name)
|
||||
|
||||
content = f"""# {project_name}
|
||||
|
||||
## 概要
|
||||
{project_info['summary']}
|
||||
|
||||
## プロジェクト情報
|
||||
- **タイプ**: {config_info.type if config_info else 'Unknown'}
|
||||
- **説明**: {config_info.text if config_info else 'No description'}
|
||||
- **ステータス**: {config_info.status if config_info else project_info.get('status', 'Unknown')}
|
||||
- **ブランチ**: {config_info.branch if config_info else 'main'}
|
||||
- **最終更新**: {project_info.get('last_updated', 'Unknown')}
|
||||
|
||||
## 主な機能・特徴
|
||||
"""
|
||||
|
||||
features = project_info.get('features', [])
|
||||
if features:
|
||||
for feature in features:
|
||||
content += f"- {feature}\n"
|
||||
else:
|
||||
content += "- 情報なし\n"
|
||||
|
||||
content += f"""
|
||||
## リンク
|
||||
- **Repository**: https://git.syui.ai/ai/{project_name}
|
||||
- **Project Documentation**: [claude/projects/{project_name}.md](https://git.syui.ai/ai/ai/src/branch/main/claude/projects/{project_name}.md)
|
||||
- **Generated Documentation**: [{project_name}/claude.md](https://git.syui.ai/ai/{project_name}/src/branch/main/claude.md)
|
||||
|
||||
---
|
||||
*このページは claude/projects/{project_name}.md から自動生成されました*
|
||||
"""
|
||||
|
||||
return content
|
||||
|
||||
def generate_wiki_home_page(self, project_summaries: Dict[str, Dict[str, str]]) -> str:
|
||||
"""Generate the main Home.md page with all project summaries."""
|
||||
content = """# AI Ecosystem Wiki
|
||||
|
||||
AI生態系プロジェクトの概要とドキュメント集約ページです。
|
||||
|
||||
## プロジェクト一覧
|
||||
|
||||
"""
|
||||
|
||||
# Group projects by type
|
||||
project_groups = {}
|
||||
for project_name, info in project_summaries.items():
|
||||
config_info = self.config.get_project_info(project_name)
|
||||
project_type = config_info.type if config_info else 'other'
|
||||
if isinstance(project_type, list):
|
||||
project_type = project_type[0] # Use first type
|
||||
|
||||
if project_type not in project_groups:
|
||||
project_groups[project_type] = []
|
||||
project_groups[project_type].append((project_name, info))
|
||||
|
||||
# Generate sections by type
|
||||
type_names = {
|
||||
'ai': '🧠 AI・知能システム',
|
||||
'gpt': '🤖 自律・対話システム',
|
||||
'os': '💻 システム・基盤',
|
||||
'card': '🎮 ゲーム・エンターテイメント',
|
||||
'shell': '⚡ ツール・ユーティリティ',
|
||||
'other': '📦 その他'
|
||||
}
|
||||
|
||||
for project_type, projects in project_groups.items():
|
||||
type_display = type_names.get(project_type, f'📁 {project_type}')
|
||||
content += f"### {type_display}\n\n"
|
||||
|
||||
for project_name, info in projects:
|
||||
content += f"#### [{project_name}](auto/{project_name}.md)\n"
|
||||
content += f"{info['summary'][:150]}{'...' if len(info['summary']) > 150 else ''}\n\n"
|
||||
|
||||
# Add quick status
|
||||
config_info = self.config.get_project_info(project_name)
|
||||
if config_info:
|
||||
content += f"**Status**: {config_info.status} \n"
|
||||
content += f"**Links**: [Repo](https://git.syui.ai/ai/{project_name}) | [Docs](https://git.syui.ai/ai/{project_name}/src/branch/main/claude.md)\n\n"
|
||||
|
||||
content += """
|
||||
---
|
||||
|
||||
## ディレクトリ構成
|
||||
|
||||
- `auto/` - 自動生成されたプロジェクト概要
|
||||
- `claude/` - Claude Code作業記録
|
||||
- `manual/` - 手動作成ドキュメント
|
||||
|
||||
---
|
||||
|
||||
*このページは ai.json と claude/projects/ から自動生成されました*
|
||||
*最終更新: {last_updated}*
|
||||
""".format(last_updated=self._get_current_timestamp())
|
||||
|
||||
return content
|
||||
|
||||
def _get_current_timestamp(self) -> str:
|
||||
"""Get current timestamp."""
|
||||
from datetime import datetime
|
||||
return datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
||||
|
||||
def update_wiki_auto_directory(self, auto_pull: bool = True) -> Tuple[bool, List[str]]:
|
||||
"""Update the auto/ directory with project summaries."""
|
||||
if not self.wiki_root:
|
||||
return False, ["ai.wiki directory not found"]
|
||||
|
||||
# Pull latest changes from wiki repository first
|
||||
if auto_pull:
|
||||
success, message = pull_wiki_repository(self.wiki_root)
|
||||
if not success:
|
||||
console.print(f"[yellow]⚠️ Wiki pull failed: {message}[/yellow]")
|
||||
console.print("[dim]Continuing with local wiki update...[/dim]")
|
||||
else:
|
||||
console.print(f"[green]✅ Wiki repository updated[/green]")
|
||||
|
||||
auto_dir = self.wiki_root / "auto"
|
||||
auto_dir.mkdir(exist_ok=True)
|
||||
|
||||
# Get claude/projects directory
|
||||
claude_projects_dir = self.ai_root / "claude" / "projects"
|
||||
if not claude_projects_dir.exists():
|
||||
return False, [f"claude/projects directory not found: {claude_projects_dir}"]
|
||||
|
||||
project_summaries = {}
|
||||
updated_files = []
|
||||
|
||||
console.print("[blue]📋 Extracting project summaries from claude/projects/...[/blue]")
|
||||
|
||||
# Process all projects from ai.json
|
||||
for project_name in self.config.list_projects():
|
||||
project_md_path = claude_projects_dir / f"{project_name}.md"
|
||||
|
||||
# Extract summary from claude/projects/${project}.md
|
||||
project_info = self.extract_project_summary(project_md_path)
|
||||
project_summaries[project_name] = project_info
|
||||
|
||||
# Generate individual project wiki page
|
||||
wiki_content = self.generate_project_wiki_page(project_name, project_info)
|
||||
wiki_file_path = auto_dir / f"{project_name}.md"
|
||||
|
||||
try:
|
||||
wiki_file_path.write_text(wiki_content, encoding="utf-8")
|
||||
updated_files.append(f"auto/{project_name}.md")
|
||||
console.print(f"[green]✓ Generated auto/{project_name}.md[/green]")
|
||||
except Exception as e:
|
||||
console.print(f"[red]✗ Failed to write auto/{project_name}.md: {e}[/red]")
|
||||
|
||||
# Generate Home.md
|
||||
try:
|
||||
home_content = self.generate_wiki_home_page(project_summaries)
|
||||
home_path = self.wiki_root / "Home.md"
|
||||
home_path.write_text(home_content, encoding="utf-8")
|
||||
updated_files.append("Home.md")
|
||||
console.print(f"[green]✓ Generated Home.md[/green]")
|
||||
except Exception as e:
|
||||
console.print(f"[red]✗ Failed to write Home.md: {e}[/red]")
|
||||
|
||||
return True, updated_files
|
118
python_backup/src/aigpt/fortune.py
Normal file
118
python_backup/src/aigpt/fortune.py
Normal file
@@ -0,0 +1,118 @@
|
||||
"""AI Fortune system for daily personality variations"""
|
||||
|
||||
import json
|
||||
import random
|
||||
from datetime import date, datetime, timedelta
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
import logging
|
||||
|
||||
from .models import AIFortune
|
||||
|
||||
|
||||
class FortuneSystem:
|
||||
"""Manages daily AI fortune affecting personality"""
|
||||
|
||||
def __init__(self, data_dir: Path):
|
||||
self.data_dir = data_dir
|
||||
self.fortune_file = data_dir / "fortunes.json"
|
||||
self.fortunes: dict[str, AIFortune] = {}
|
||||
self.logger = logging.getLogger(__name__)
|
||||
self._load_fortunes()
|
||||
|
||||
def _load_fortunes(self):
|
||||
"""Load fortune history from storage"""
|
||||
if self.fortune_file.exists():
|
||||
with open(self.fortune_file, 'r', encoding='utf-8') as f:
|
||||
data = json.load(f)
|
||||
for date_str, fortune_data in data.items():
|
||||
# Convert date string back to date object
|
||||
fortune_data['date'] = datetime.fromisoformat(fortune_data['date']).date()
|
||||
self.fortunes[date_str] = AIFortune(**fortune_data)
|
||||
|
||||
def _save_fortunes(self):
|
||||
"""Save fortune history to storage"""
|
||||
data = {}
|
||||
for date_str, fortune in self.fortunes.items():
|
||||
fortune_dict = fortune.model_dump(mode='json')
|
||||
fortune_dict['date'] = fortune.date.isoformat()
|
||||
data[date_str] = fortune_dict
|
||||
|
||||
with open(self.fortune_file, 'w', encoding='utf-8') as f:
|
||||
json.dump(data, f, indent=2)
|
||||
|
||||
def get_today_fortune(self) -> AIFortune:
|
||||
"""Get or generate today's fortune"""
|
||||
today = date.today()
|
||||
today_str = today.isoformat()
|
||||
|
||||
if today_str in self.fortunes:
|
||||
return self.fortunes[today_str]
|
||||
|
||||
# Generate new fortune
|
||||
fortune_value = random.randint(1, 10)
|
||||
|
||||
# Check yesterday's fortune for consecutive tracking
|
||||
yesterday = (today - timedelta(days=1))
|
||||
yesterday_str = yesterday.isoformat()
|
||||
|
||||
consecutive_good = 0
|
||||
consecutive_bad = 0
|
||||
breakthrough_triggered = False
|
||||
|
||||
if yesterday_str in self.fortunes:
|
||||
yesterday_fortune = self.fortunes[yesterday_str]
|
||||
|
||||
if fortune_value >= 7: # Good fortune
|
||||
if yesterday_fortune.fortune_value >= 7:
|
||||
consecutive_good = yesterday_fortune.consecutive_good + 1
|
||||
else:
|
||||
consecutive_good = 1
|
||||
elif fortune_value <= 3: # Bad fortune
|
||||
if yesterday_fortune.fortune_value <= 3:
|
||||
consecutive_bad = yesterday_fortune.consecutive_bad + 1
|
||||
else:
|
||||
consecutive_bad = 1
|
||||
|
||||
# Check breakthrough conditions
|
||||
if consecutive_good >= 3:
|
||||
breakthrough_triggered = True
|
||||
self.logger.info("Breakthrough! 3 consecutive good fortunes!")
|
||||
fortune_value = 10 # Max fortune on breakthrough
|
||||
elif consecutive_bad >= 3:
|
||||
breakthrough_triggered = True
|
||||
self.logger.info("Breakthrough! 3 consecutive bad fortunes!")
|
||||
fortune_value = random.randint(7, 10) # Good fortune after bad streak
|
||||
|
||||
fortune = AIFortune(
|
||||
date=today,
|
||||
fortune_value=fortune_value,
|
||||
consecutive_good=consecutive_good,
|
||||
consecutive_bad=consecutive_bad,
|
||||
breakthrough_triggered=breakthrough_triggered
|
||||
)
|
||||
|
||||
self.fortunes[today_str] = fortune
|
||||
self._save_fortunes()
|
||||
|
||||
self.logger.info(f"Today's fortune: {fortune_value}/10")
|
||||
return fortune
|
||||
|
||||
def get_personality_modifier(self, fortune: AIFortune) -> dict[str, float]:
|
||||
"""Get personality modifiers based on fortune"""
|
||||
base_modifier = fortune.fortune_value / 10.0
|
||||
|
||||
modifiers = {
|
||||
"optimism": base_modifier,
|
||||
"energy": base_modifier * 0.8,
|
||||
"patience": 1.0 - (abs(5.5 - fortune.fortune_value) * 0.1),
|
||||
"creativity": 0.5 + (base_modifier * 0.5),
|
||||
"empathy": 0.7 + (base_modifier * 0.3)
|
||||
}
|
||||
|
||||
# Breakthrough effects
|
||||
if fortune.breakthrough_triggered:
|
||||
modifiers["confidence"] = 1.0
|
||||
modifiers["spontaneity"] = 0.9
|
||||
|
||||
return modifiers
|
1016
python_backup/src/aigpt/mcp_server.py
Normal file
1016
python_backup/src/aigpt/mcp_server.py
Normal file
File diff suppressed because it is too large
Load Diff
146
python_backup/src/aigpt/mcp_server_simple.py
Normal file
146
python_backup/src/aigpt/mcp_server_simple.py
Normal file
@@ -0,0 +1,146 @@
|
||||
"""Simple MCP Server implementation for ai.gpt"""
|
||||
|
||||
from mcp import Server
|
||||
from mcp.types import Tool, TextContent
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
import json
|
||||
|
||||
from .persona import Persona
|
||||
from .ai_provider import create_ai_provider
|
||||
import subprocess
|
||||
import os
|
||||
|
||||
|
||||
def create_mcp_server(data_dir: Path, enable_card: bool = False) -> Server:
|
||||
"""Create MCP server with ai.gpt tools"""
|
||||
server = Server("aigpt")
|
||||
persona = Persona(data_dir)
|
||||
|
||||
@server.tool()
|
||||
async def get_memories(limit: int = 10) -> List[Dict[str, Any]]:
|
||||
"""Get active memories from the AI's memory system"""
|
||||
memories = persona.memory.get_active_memories(limit=limit)
|
||||
return [
|
||||
{
|
||||
"id": mem.id,
|
||||
"content": mem.content,
|
||||
"level": mem.level.value,
|
||||
"importance": mem.importance_score,
|
||||
"is_core": mem.is_core,
|
||||
"timestamp": mem.timestamp.isoformat()
|
||||
}
|
||||
for mem in memories
|
||||
]
|
||||
|
||||
@server.tool()
|
||||
async def get_relationship(user_id: str) -> Dict[str, Any]:
|
||||
"""Get relationship status with a specific user"""
|
||||
rel = persona.relationships.get_or_create_relationship(user_id)
|
||||
return {
|
||||
"user_id": rel.user_id,
|
||||
"status": rel.status.value,
|
||||
"score": rel.score,
|
||||
"transmission_enabled": rel.transmission_enabled,
|
||||
"is_broken": rel.is_broken,
|
||||
"total_interactions": rel.total_interactions,
|
||||
"last_interaction": rel.last_interaction.isoformat() if rel.last_interaction else None
|
||||
}
|
||||
|
||||
@server.tool()
|
||||
async def process_interaction(user_id: str, message: str, provider: str = "ollama", model: str = "qwen2.5") -> Dict[str, Any]:
|
||||
"""Process an interaction with a user"""
|
||||
ai_provider = create_ai_provider(provider, model)
|
||||
response, relationship_delta = persona.process_interaction(user_id, message, ai_provider)
|
||||
rel = persona.relationships.get_or_create_relationship(user_id)
|
||||
|
||||
return {
|
||||
"response": response,
|
||||
"relationship_delta": relationship_delta,
|
||||
"new_relationship_score": rel.score,
|
||||
"transmission_enabled": rel.transmission_enabled,
|
||||
"relationship_status": rel.status.value
|
||||
}
|
||||
|
||||
@server.tool()
|
||||
async def get_fortune() -> Dict[str, Any]:
|
||||
"""Get today's AI fortune"""
|
||||
fortune = persona.fortune_system.get_today_fortune()
|
||||
modifiers = persona.fortune_system.get_personality_modifier(fortune)
|
||||
|
||||
return {
|
||||
"value": fortune.fortune_value,
|
||||
"date": fortune.date.isoformat(),
|
||||
"consecutive_good": fortune.consecutive_good,
|
||||
"consecutive_bad": fortune.consecutive_bad,
|
||||
"breakthrough": fortune.breakthrough_triggered,
|
||||
"personality_modifiers": modifiers
|
||||
}
|
||||
|
||||
@server.tool()
|
||||
async def execute_command(command: str, working_dir: str = ".") -> Dict[str, Any]:
|
||||
"""Execute a shell command"""
|
||||
try:
|
||||
import shlex
|
||||
result = subprocess.run(
|
||||
shlex.split(command),
|
||||
cwd=working_dir,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=60
|
||||
)
|
||||
|
||||
return {
|
||||
"status": "success" if result.returncode == 0 else "error",
|
||||
"returncode": result.returncode,
|
||||
"stdout": result.stdout,
|
||||
"stderr": result.stderr,
|
||||
"command": command
|
||||
}
|
||||
except subprocess.TimeoutExpired:
|
||||
return {"error": "Command timed out"}
|
||||
except Exception as e:
|
||||
return {"error": str(e)}
|
||||
|
||||
@server.tool()
|
||||
async def analyze_file(file_path: str) -> Dict[str, Any]:
|
||||
"""Analyze a file using AI"""
|
||||
try:
|
||||
if not os.path.exists(file_path):
|
||||
return {"error": f"File not found: {file_path}"}
|
||||
|
||||
with open(file_path, 'r', encoding='utf-8') as f:
|
||||
content = f.read()
|
||||
|
||||
ai_provider = create_ai_provider("ollama", "qwen2.5")
|
||||
|
||||
prompt = f"Analyze this file and provide insights:\\n\\nFile: {file_path}\\n\\nContent:\\n{content[:2000]}"
|
||||
analysis = ai_provider.generate_response(prompt, "You are a code analyst.")
|
||||
|
||||
return {
|
||||
"analysis": analysis,
|
||||
"file_path": file_path,
|
||||
"file_size": len(content),
|
||||
"line_count": len(content.split('\\n'))
|
||||
}
|
||||
except Exception as e:
|
||||
return {"error": str(e)}
|
||||
|
||||
return server
|
||||
|
||||
|
||||
async def main():
|
||||
"""Run MCP server"""
|
||||
import sys
|
||||
from mcp import stdio_server
|
||||
|
||||
data_dir = Path.home() / ".config" / "syui" / "ai" / "gpt" / "data"
|
||||
data_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
server = create_mcp_server(data_dir)
|
||||
await stdio_server(server)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import asyncio
|
||||
asyncio.run(main())
|
408
python_backup/src/aigpt/memory.py
Normal file
408
python_backup/src/aigpt/memory.py
Normal file
@@ -0,0 +1,408 @@
|
||||
"""Memory management system for ai.gpt"""
|
||||
|
||||
import json
|
||||
import hashlib
|
||||
from datetime import datetime, timedelta
|
||||
from pathlib import Path
|
||||
from typing import List, Optional, Dict, Any
|
||||
import logging
|
||||
|
||||
from .models import Memory, MemoryLevel, Conversation
|
||||
|
||||
|
||||
class MemoryManager:
|
||||
"""Manages AI's memory with hierarchical storage and forgetting"""
|
||||
|
||||
def __init__(self, data_dir: Path):
|
||||
self.data_dir = data_dir
|
||||
self.memories_file = data_dir / "memories.json"
|
||||
self.conversations_file = data_dir / "conversations.json"
|
||||
self.memories: Dict[str, Memory] = {}
|
||||
self.conversations: List[Conversation] = []
|
||||
self.logger = logging.getLogger(__name__)
|
||||
self._load_memories()
|
||||
|
||||
def _load_memories(self):
|
||||
"""Load memories from persistent storage"""
|
||||
if self.memories_file.exists():
|
||||
with open(self.memories_file, 'r', encoding='utf-8') as f:
|
||||
data = json.load(f)
|
||||
for mem_data in data:
|
||||
memory = Memory(**mem_data)
|
||||
self.memories[memory.id] = memory
|
||||
|
||||
if self.conversations_file.exists():
|
||||
with open(self.conversations_file, 'r', encoding='utf-8') as f:
|
||||
data = json.load(f)
|
||||
self.conversations = [Conversation(**conv) for conv in data]
|
||||
|
||||
def _save_memories(self):
|
||||
"""Save memories to persistent storage"""
|
||||
memories_data = [mem.model_dump(mode='json') for mem in self.memories.values()]
|
||||
with open(self.memories_file, 'w', encoding='utf-8') as f:
|
||||
json.dump(memories_data, f, indent=2, default=str)
|
||||
|
||||
conv_data = [conv.model_dump(mode='json') for conv in self.conversations]
|
||||
with open(self.conversations_file, 'w', encoding='utf-8') as f:
|
||||
json.dump(conv_data, f, indent=2, default=str)
|
||||
|
||||
def add_conversation(self, conversation: Conversation) -> Memory:
|
||||
"""Add a conversation and create memory from it"""
|
||||
self.conversations.append(conversation)
|
||||
|
||||
# Create memory from conversation
|
||||
memory_id = hashlib.sha256(
|
||||
f"{conversation.id}{conversation.timestamp}".encode()
|
||||
).hexdigest()[:16]
|
||||
|
||||
memory = Memory(
|
||||
id=memory_id,
|
||||
timestamp=conversation.timestamp,
|
||||
content=f"User: {conversation.user_message}\nAI: {conversation.ai_response}",
|
||||
level=MemoryLevel.FULL_LOG,
|
||||
importance_score=abs(conversation.relationship_delta) * 0.1
|
||||
)
|
||||
|
||||
self.memories[memory.id] = memory
|
||||
self._save_memories()
|
||||
return memory
|
||||
|
||||
def add_memory(self, memory: Memory):
|
||||
"""Add a memory directly to the system"""
|
||||
self.memories[memory.id] = memory
|
||||
self._save_memories()
|
||||
|
||||
def create_smart_summary(self, user_id: str, ai_provider=None) -> Optional[Memory]:
|
||||
"""Create AI-powered thematic summary from recent memories"""
|
||||
recent_memories = [
|
||||
mem for mem in self.memories.values()
|
||||
if mem.level == MemoryLevel.FULL_LOG
|
||||
and (datetime.now() - mem.timestamp).days < 7
|
||||
]
|
||||
|
||||
if len(recent_memories) < 5:
|
||||
return None
|
||||
|
||||
# Sort by timestamp for chronological analysis
|
||||
recent_memories.sort(key=lambda m: m.timestamp)
|
||||
|
||||
# Prepare conversation context for AI analysis
|
||||
conversations_text = "\n\n".join([
|
||||
f"[{mem.timestamp.strftime('%Y-%m-%d %H:%M')}] {mem.content}"
|
||||
for mem in recent_memories
|
||||
])
|
||||
|
||||
summary_prompt = f"""
|
||||
Analyze these recent conversations and create a thematic summary focusing on:
|
||||
1. Communication patterns and user preferences
|
||||
2. Technical topics and problem-solving approaches
|
||||
3. Relationship progression and trust level
|
||||
4. Key recurring themes and interests
|
||||
|
||||
Conversations:
|
||||
{conversations_text}
|
||||
|
||||
Create a concise summary (2-3 sentences) that captures the essence of this interaction period:
|
||||
"""
|
||||
|
||||
try:
|
||||
if ai_provider:
|
||||
summary_content = ai_provider.chat(summary_prompt, max_tokens=200)
|
||||
else:
|
||||
# Fallback to pattern-based analysis
|
||||
themes = self._extract_themes(recent_memories)
|
||||
summary_content = f"Themes: {', '.join(themes[:3])}. {len(recent_memories)} interactions with focus on technical discussions."
|
||||
except Exception as e:
|
||||
self.logger.warning(f"AI summary failed, using fallback: {e}")
|
||||
themes = self._extract_themes(recent_memories)
|
||||
summary_content = f"Themes: {', '.join(themes[:3])}. {len(recent_memories)} interactions."
|
||||
|
||||
summary_id = hashlib.sha256(
|
||||
f"summary_{datetime.now().isoformat()}".encode()
|
||||
).hexdigest()[:16]
|
||||
|
||||
summary = Memory(
|
||||
id=summary_id,
|
||||
timestamp=datetime.now(),
|
||||
content=f"SUMMARY ({len(recent_memories)} conversations): {summary_content}",
|
||||
summary=summary_content,
|
||||
level=MemoryLevel.SUMMARY,
|
||||
importance_score=0.6,
|
||||
metadata={
|
||||
"memory_count": len(recent_memories),
|
||||
"time_span": f"{recent_memories[0].timestamp.date()} to {recent_memories[-1].timestamp.date()}",
|
||||
"themes": self._extract_themes(recent_memories)[:5]
|
||||
}
|
||||
)
|
||||
|
||||
self.memories[summary.id] = summary
|
||||
|
||||
# Reduce importance of summarized memories
|
||||
for mem in recent_memories:
|
||||
mem.importance_score *= 0.8
|
||||
|
||||
self._save_memories()
|
||||
return summary
|
||||
|
||||
def _extract_themes(self, memories: List[Memory]) -> List[str]:
|
||||
"""Extract common themes from memory content"""
|
||||
common_words = {}
|
||||
for memory in memories:
|
||||
# Simple keyword extraction
|
||||
words = memory.content.lower().split()
|
||||
for word in words:
|
||||
if len(word) > 4 and word.isalpha():
|
||||
common_words[word] = common_words.get(word, 0) + 1
|
||||
|
||||
# Return most frequent meaningful words
|
||||
return sorted(common_words.keys(), key=common_words.get, reverse=True)[:10]
|
||||
|
||||
def create_core_memory(self, ai_provider=None) -> Optional[Memory]:
|
||||
"""Analyze all memories to extract core personality-forming elements"""
|
||||
# Collect all non-forgotten memories for analysis
|
||||
all_memories = [
|
||||
mem for mem in self.memories.values()
|
||||
if mem.level != MemoryLevel.FORGOTTEN
|
||||
]
|
||||
|
||||
if len(all_memories) < 10:
|
||||
return None
|
||||
|
||||
# Sort by importance and timestamp for comprehensive analysis
|
||||
all_memories.sort(key=lambda m: (m.importance_score, m.timestamp), reverse=True)
|
||||
|
||||
# Prepare memory context for AI analysis
|
||||
memory_context = "\n".join([
|
||||
f"[{mem.level.value}] {mem.timestamp.strftime('%Y-%m-%d')}: {mem.content[:200]}..."
|
||||
for mem in all_memories[:20] # Top 20 memories
|
||||
])
|
||||
|
||||
core_prompt = f"""
|
||||
Analyze these conversations and memories to identify core personality elements that define this user relationship:
|
||||
|
||||
1. Communication style and preferences
|
||||
2. Core values and principles
|
||||
3. Problem-solving patterns
|
||||
4. Trust level and relationship depth
|
||||
5. Unique characteristics that make this relationship special
|
||||
|
||||
Memories:
|
||||
{memory_context}
|
||||
|
||||
Extract the essential personality-forming elements (2-3 sentences) that should NEVER be forgotten:
|
||||
"""
|
||||
|
||||
try:
|
||||
if ai_provider:
|
||||
core_content = ai_provider.chat(core_prompt, max_tokens=150)
|
||||
else:
|
||||
# Fallback to pattern analysis
|
||||
user_patterns = self._analyze_user_patterns(all_memories)
|
||||
core_content = f"User shows {user_patterns['communication_style']} communication, focuses on {user_patterns['main_interests']}, and demonstrates {user_patterns['problem_solving']} approach."
|
||||
except Exception as e:
|
||||
self.logger.warning(f"AI core analysis failed, using fallback: {e}")
|
||||
user_patterns = self._analyze_user_patterns(all_memories)
|
||||
core_content = f"Core pattern: {user_patterns['communication_style']} style, {user_patterns['main_interests']} interests."
|
||||
|
||||
# Create core memory
|
||||
core_id = hashlib.sha256(
|
||||
f"core_{datetime.now().isoformat()}".encode()
|
||||
).hexdigest()[:16]
|
||||
|
||||
core_memory = Memory(
|
||||
id=core_id,
|
||||
timestamp=datetime.now(),
|
||||
content=f"CORE PERSONALITY: {core_content}",
|
||||
summary=core_content,
|
||||
level=MemoryLevel.CORE,
|
||||
importance_score=1.0,
|
||||
is_core=True,
|
||||
metadata={
|
||||
"source_memories": len(all_memories),
|
||||
"analysis_date": datetime.now().isoformat(),
|
||||
"patterns": self._analyze_user_patterns(all_memories)
|
||||
}
|
||||
)
|
||||
|
||||
self.memories[core_memory.id] = core_memory
|
||||
self._save_memories()
|
||||
|
||||
self.logger.info(f"Core memory created: {core_id}")
|
||||
return core_memory
|
||||
|
||||
def _analyze_user_patterns(self, memories: List[Memory]) -> Dict[str, str]:
|
||||
"""Analyze patterns in user behavior from memories"""
|
||||
# Extract patterns from conversation content
|
||||
all_content = " ".join([mem.content.lower() for mem in memories])
|
||||
|
||||
# Simple pattern detection
|
||||
communication_indicators = {
|
||||
"technical": ["code", "implementation", "system", "api", "database"],
|
||||
"casual": ["thanks", "please", "sorry", "help"],
|
||||
"formal": ["could", "would", "should", "proper"]
|
||||
}
|
||||
|
||||
problem_solving_indicators = {
|
||||
"systematic": ["first", "then", "next", "step", "plan"],
|
||||
"experimental": ["try", "test", "experiment", "see"],
|
||||
"theoretical": ["concept", "design", "architecture", "pattern"]
|
||||
}
|
||||
|
||||
# Score each pattern
|
||||
communication_style = max(
|
||||
communication_indicators.keys(),
|
||||
key=lambda style: sum(all_content.count(word) for word in communication_indicators[style])
|
||||
)
|
||||
|
||||
problem_solving = max(
|
||||
problem_solving_indicators.keys(),
|
||||
key=lambda style: sum(all_content.count(word) for word in problem_solving_indicators[style])
|
||||
)
|
||||
|
||||
# Extract main interests from themes
|
||||
themes = self._extract_themes(memories)
|
||||
main_interests = ", ".join(themes[:3]) if themes else "general technology"
|
||||
|
||||
return {
|
||||
"communication_style": communication_style,
|
||||
"problem_solving": problem_solving,
|
||||
"main_interests": main_interests,
|
||||
"interaction_count": len(memories)
|
||||
}
|
||||
|
||||
def identify_core_memories(self) -> List[Memory]:
|
||||
"""Identify existing memories that should become core (legacy method)"""
|
||||
core_candidates = [
|
||||
mem for mem in self.memories.values()
|
||||
if mem.importance_score > 0.8
|
||||
and not mem.is_core
|
||||
and mem.level != MemoryLevel.FORGOTTEN
|
||||
]
|
||||
|
||||
for memory in core_candidates:
|
||||
memory.is_core = True
|
||||
memory.level = MemoryLevel.CORE
|
||||
self.logger.info(f"Memory {memory.id} promoted to core")
|
||||
|
||||
self._save_memories()
|
||||
return core_candidates
|
||||
|
||||
def apply_forgetting(self):
|
||||
"""Apply selective forgetting based on importance and time"""
|
||||
now = datetime.now()
|
||||
|
||||
for memory in self.memories.values():
|
||||
if memory.is_core or memory.level == MemoryLevel.FORGOTTEN:
|
||||
continue
|
||||
|
||||
# Time-based decay
|
||||
age_days = (now - memory.timestamp).days
|
||||
decay_factor = memory.decay_rate * age_days
|
||||
memory.importance_score -= decay_factor
|
||||
|
||||
# Forget unimportant old memories
|
||||
if memory.importance_score <= 0.1 and age_days > 30:
|
||||
memory.level = MemoryLevel.FORGOTTEN
|
||||
self.logger.info(f"Memory {memory.id} forgotten")
|
||||
|
||||
self._save_memories()
|
||||
|
||||
def get_active_memories(self, limit: int = 10) -> List[Memory]:
|
||||
"""Get currently active memories for persona (legacy method)"""
|
||||
active = [
|
||||
mem for mem in self.memories.values()
|
||||
if mem.level != MemoryLevel.FORGOTTEN
|
||||
]
|
||||
|
||||
# Sort by importance and recency
|
||||
active.sort(
|
||||
key=lambda m: (m.is_core, m.importance_score, m.timestamp),
|
||||
reverse=True
|
||||
)
|
||||
|
||||
return active[:limit]
|
||||
|
||||
def get_contextual_memories(self, query: str = "", limit: int = 10) -> Dict[str, List[Memory]]:
|
||||
"""Get memories organized by priority with contextual relevance"""
|
||||
all_memories = [
|
||||
mem for mem in self.memories.values()
|
||||
if mem.level != MemoryLevel.FORGOTTEN
|
||||
]
|
||||
|
||||
# Categorize memories by type and importance
|
||||
core_memories = [mem for mem in all_memories if mem.level == MemoryLevel.CORE]
|
||||
summary_memories = [mem for mem in all_memories if mem.level == MemoryLevel.SUMMARY]
|
||||
recent_memories = [
|
||||
mem for mem in all_memories
|
||||
if mem.level == MemoryLevel.FULL_LOG
|
||||
and (datetime.now() - mem.timestamp).days < 3
|
||||
]
|
||||
|
||||
# Apply keyword relevance if query provided
|
||||
if query:
|
||||
query_lower = query.lower()
|
||||
|
||||
def relevance_score(memory: Memory) -> float:
|
||||
content_score = 1 if query_lower in memory.content.lower() else 0
|
||||
summary_score = 1 if memory.summary and query_lower in memory.summary.lower() else 0
|
||||
metadata_score = 1 if any(
|
||||
query_lower in str(v).lower()
|
||||
for v in (memory.metadata or {}).values()
|
||||
) else 0
|
||||
return content_score + summary_score + metadata_score
|
||||
|
||||
# Re-rank by relevance while maintaining type priority
|
||||
core_memories.sort(key=lambda m: (relevance_score(m), m.importance_score), reverse=True)
|
||||
summary_memories.sort(key=lambda m: (relevance_score(m), m.importance_score), reverse=True)
|
||||
recent_memories.sort(key=lambda m: (relevance_score(m), m.importance_score), reverse=True)
|
||||
else:
|
||||
# Sort by importance and recency
|
||||
core_memories.sort(key=lambda m: (m.importance_score, m.timestamp), reverse=True)
|
||||
summary_memories.sort(key=lambda m: (m.importance_score, m.timestamp), reverse=True)
|
||||
recent_memories.sort(key=lambda m: (m.importance_score, m.timestamp), reverse=True)
|
||||
|
||||
# Return organized memory structure
|
||||
return {
|
||||
"core": core_memories[:3], # Always include top core memories
|
||||
"summary": summary_memories[:3], # Recent summaries
|
||||
"recent": recent_memories[:limit-6], # Fill remaining with recent
|
||||
"all_active": all_memories[:limit] # Fallback for simple access
|
||||
}
|
||||
|
||||
def search_memories(self, keywords: List[str], memory_types: List[MemoryLevel] = None) -> List[Memory]:
|
||||
"""Search memories by keywords and optionally filter by memory types"""
|
||||
if memory_types is None:
|
||||
memory_types = [MemoryLevel.CORE, MemoryLevel.SUMMARY, MemoryLevel.FULL_LOG]
|
||||
|
||||
matching_memories = []
|
||||
|
||||
for memory in self.memories.values():
|
||||
if memory.level not in memory_types or memory.level == MemoryLevel.FORGOTTEN:
|
||||
continue
|
||||
|
||||
# Check if any keyword matches in content, summary, or metadata
|
||||
content_text = f"{memory.content} {memory.summary or ''}"
|
||||
if memory.metadata:
|
||||
content_text += " " + " ".join(str(v) for v in memory.metadata.values())
|
||||
|
||||
content_lower = content_text.lower()
|
||||
|
||||
# Score by keyword matches
|
||||
match_score = sum(
|
||||
keyword.lower() in content_lower
|
||||
for keyword in keywords
|
||||
)
|
||||
|
||||
if match_score > 0:
|
||||
# Add match score to memory for sorting
|
||||
memory_copy = memory.model_copy()
|
||||
memory_copy.importance_score += match_score * 0.1
|
||||
matching_memories.append(memory_copy)
|
||||
|
||||
# Sort by relevance (match score + importance + core status)
|
||||
matching_memories.sort(
|
||||
key=lambda m: (m.is_core, m.importance_score, m.timestamp),
|
||||
reverse=True
|
||||
)
|
||||
|
||||
return matching_memories
|
88
python_backup/src/aigpt/models.py
Normal file
88
python_backup/src/aigpt/models.py
Normal file
@@ -0,0 +1,88 @@
|
||||
"""Data models for ai.gpt system"""
|
||||
|
||||
from datetime import datetime, date
|
||||
from typing import Optional, Dict, List, Any
|
||||
from enum import Enum
|
||||
from pydantic import BaseModel, Field, field_validator
|
||||
|
||||
|
||||
class MemoryLevel(str, Enum):
|
||||
"""Memory importance levels"""
|
||||
FULL_LOG = "full_log"
|
||||
SUMMARY = "summary"
|
||||
CORE = "core"
|
||||
FORGOTTEN = "forgotten"
|
||||
|
||||
|
||||
class RelationshipStatus(str, Enum):
|
||||
"""Relationship status levels"""
|
||||
STRANGER = "stranger"
|
||||
ACQUAINTANCE = "acquaintance"
|
||||
FRIEND = "friend"
|
||||
CLOSE_FRIEND = "close_friend"
|
||||
BROKEN = "broken" # 不可逆
|
||||
|
||||
|
||||
class Memory(BaseModel):
|
||||
"""Single memory unit"""
|
||||
id: str
|
||||
timestamp: datetime
|
||||
content: str
|
||||
summary: Optional[str] = None
|
||||
level: MemoryLevel = MemoryLevel.FULL_LOG
|
||||
importance_score: float
|
||||
is_core: bool = False
|
||||
decay_rate: float = 0.01
|
||||
metadata: Optional[Dict[str, Any]] = None
|
||||
|
||||
@field_validator('importance_score')
|
||||
@classmethod
|
||||
def validate_importance_score(cls, v):
|
||||
"""Ensure importance_score is within valid range, handle floating point precision issues"""
|
||||
if abs(v) < 1e-10: # Very close to zero
|
||||
return 0.0
|
||||
return max(0.0, min(1.0, v))
|
||||
|
||||
|
||||
class Relationship(BaseModel):
|
||||
"""Relationship with a specific user"""
|
||||
user_id: str # atproto DID
|
||||
status: RelationshipStatus = RelationshipStatus.STRANGER
|
||||
score: float = 0.0
|
||||
daily_interactions: int = 0
|
||||
total_interactions: int = 0
|
||||
last_interaction: Optional[datetime] = None
|
||||
transmission_enabled: bool = False
|
||||
threshold: float = 100.0
|
||||
decay_rate: float = 0.1
|
||||
daily_limit: int = 10
|
||||
is_broken: bool = False
|
||||
|
||||
|
||||
class AIFortune(BaseModel):
|
||||
"""Daily AI fortune affecting personality"""
|
||||
date: date
|
||||
fortune_value: int = Field(ge=1, le=10)
|
||||
consecutive_good: int = 0
|
||||
consecutive_bad: int = 0
|
||||
breakthrough_triggered: bool = False
|
||||
|
||||
|
||||
class PersonaState(BaseModel):
|
||||
"""Current persona state"""
|
||||
base_personality: Dict[str, float]
|
||||
current_mood: str
|
||||
fortune: AIFortune
|
||||
active_memories: List[str] # Memory IDs
|
||||
relationship_modifiers: Dict[str, float]
|
||||
|
||||
|
||||
class Conversation(BaseModel):
|
||||
"""Conversation log entry"""
|
||||
id: str
|
||||
user_id: str
|
||||
timestamp: datetime
|
||||
user_message: str
|
||||
ai_response: str
|
||||
relationship_delta: float = 0.0
|
||||
memory_created: bool = False
|
263
python_backup/src/aigpt/persona.py
Normal file
263
python_backup/src/aigpt/persona.py
Normal file
@@ -0,0 +1,263 @@
|
||||
"""Persona management system integrating memory, relationships, and fortune"""
|
||||
|
||||
import json
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional
|
||||
import logging
|
||||
|
||||
from .models import PersonaState, Conversation
|
||||
from .memory import MemoryManager
|
||||
from .relationship import RelationshipTracker
|
||||
from .fortune import FortuneSystem
|
||||
|
||||
|
||||
class Persona:
|
||||
"""AI persona with unique characteristics based on interactions"""
|
||||
|
||||
def __init__(self, data_dir: Path, name: str = "ai"):
|
||||
self.data_dir = data_dir
|
||||
self.name = name
|
||||
self.memory = MemoryManager(data_dir)
|
||||
self.relationships = RelationshipTracker(data_dir)
|
||||
self.fortune_system = FortuneSystem(data_dir)
|
||||
self.logger = logging.getLogger(__name__)
|
||||
|
||||
# Base personality traits
|
||||
self.base_personality = {
|
||||
"curiosity": 0.7,
|
||||
"empathy": 0.8,
|
||||
"creativity": 0.6,
|
||||
"patience": 0.7,
|
||||
"optimism": 0.6
|
||||
}
|
||||
|
||||
self.state_file = data_dir / "persona_state.json"
|
||||
self._load_state()
|
||||
|
||||
def _load_state(self):
|
||||
"""Load persona state from storage"""
|
||||
if self.state_file.exists():
|
||||
with open(self.state_file, 'r', encoding='utf-8') as f:
|
||||
data = json.load(f)
|
||||
self.base_personality = data.get("base_personality", self.base_personality)
|
||||
|
||||
def _save_state(self):
|
||||
"""Save persona state to storage"""
|
||||
state_data = {
|
||||
"base_personality": self.base_personality,
|
||||
"last_updated": datetime.now().isoformat()
|
||||
}
|
||||
with open(self.state_file, 'w', encoding='utf-8') as f:
|
||||
json.dump(state_data, f, indent=2)
|
||||
|
||||
def get_current_state(self) -> PersonaState:
|
||||
"""Get current persona state including all modifiers"""
|
||||
# Get today's fortune
|
||||
fortune = self.fortune_system.get_today_fortune()
|
||||
fortune_modifiers = self.fortune_system.get_personality_modifier(fortune)
|
||||
|
||||
# Apply fortune modifiers to base personality
|
||||
current_personality = {}
|
||||
for trait, base_value in self.base_personality.items():
|
||||
modifier = fortune_modifiers.get(trait, 1.0)
|
||||
current_personality[trait] = min(1.0, base_value * modifier)
|
||||
|
||||
# Get active memories for context
|
||||
active_memories = self.memory.get_active_memories(limit=5)
|
||||
|
||||
# Determine mood based on fortune and recent interactions
|
||||
mood = self._determine_mood(fortune.fortune_value)
|
||||
|
||||
state = PersonaState(
|
||||
base_personality=current_personality,
|
||||
current_mood=mood,
|
||||
fortune=fortune,
|
||||
active_memories=[mem.id for mem in active_memories],
|
||||
relationship_modifiers={}
|
||||
)
|
||||
|
||||
return state
|
||||
|
||||
def _determine_mood(self, fortune_value: int) -> str:
|
||||
"""Determine current mood based on fortune and other factors"""
|
||||
if fortune_value >= 8:
|
||||
return "joyful"
|
||||
elif fortune_value >= 6:
|
||||
return "cheerful"
|
||||
elif fortune_value >= 4:
|
||||
return "neutral"
|
||||
elif fortune_value >= 2:
|
||||
return "melancholic"
|
||||
else:
|
||||
return "contemplative"
|
||||
|
||||
def build_context_prompt(self, user_id: str, current_message: str) -> str:
|
||||
"""Build context-aware prompt with relevant memories"""
|
||||
# Get contextual memories based on current message
|
||||
memory_groups = self.memory.get_contextual_memories(query=current_message, limit=8)
|
||||
|
||||
# Build context sections
|
||||
context_parts = []
|
||||
|
||||
# Core personality elements (always included)
|
||||
if memory_groups["core"]:
|
||||
core_context = "\n".join([mem.content for mem in memory_groups["core"]])
|
||||
context_parts.append(f"CORE PERSONALITY:\n{core_context}")
|
||||
|
||||
# Recent summaries for context
|
||||
if memory_groups["summary"]:
|
||||
summary_context = "\n".join([mem.summary or mem.content for mem in memory_groups["summary"]])
|
||||
context_parts.append(f"RECENT PATTERNS:\n{summary_context}")
|
||||
|
||||
# Recent specific interactions
|
||||
if memory_groups["recent"]:
|
||||
recent_context = "\n".join([
|
||||
f"[{mem.timestamp.strftime('%m-%d')}] {mem.content[:100]}..."
|
||||
for mem in memory_groups["recent"][:3]
|
||||
])
|
||||
context_parts.append(f"RECENT INTERACTIONS:\n{recent_context}")
|
||||
|
||||
# Get current persona state
|
||||
state = self.get_current_state()
|
||||
|
||||
# Build final prompt
|
||||
context_prompt = f"""You are an AI with persistent memory and evolving relationships. Your current state:
|
||||
|
||||
PERSONALITY: {', '.join([f'{k}={v:.1f}' for k, v in state.base_personality.items()])}
|
||||
MOOD: {state.current_mood}
|
||||
FORTUNE: {state.fortune.fortune_value}/10
|
||||
|
||||
"""
|
||||
|
||||
if context_parts:
|
||||
context_prompt += "RELEVANT CONTEXT:\n" + "\n\n".join(context_parts) + "\n\n"
|
||||
|
||||
context_prompt += f"""IMPORTANT: You have access to the following tools:
|
||||
- Memory tools: get_memories, search_memories, get_contextual_memories
|
||||
- Relationship tools: get_relationship
|
||||
- Card game tools: card_get_user_cards, card_draw_card, card_analyze_collection
|
||||
|
||||
When asked about cards, collections, or anything card-related, YOU MUST use the card tools.
|
||||
For "カードコレクションを見せて" or similar requests, use card_get_user_cards with did='{user_id}'.
|
||||
|
||||
Respond to this message while staying true to your personality and the established relationship context:
|
||||
|
||||
User: {current_message}
|
||||
|
||||
AI:"""
|
||||
|
||||
return context_prompt
|
||||
|
||||
def process_interaction(self, user_id: str, message: str, ai_provider=None) -> tuple[str, float]:
|
||||
"""Process user interaction and generate response with enhanced context"""
|
||||
# Get current state
|
||||
state = self.get_current_state()
|
||||
|
||||
# Get relationship with user
|
||||
relationship = self.relationships.get_or_create_relationship(user_id)
|
||||
|
||||
# Enhanced response generation with context awareness
|
||||
if relationship.is_broken:
|
||||
response = "..."
|
||||
relationship_delta = 0.0
|
||||
else:
|
||||
if ai_provider:
|
||||
# Build context-aware prompt
|
||||
context_prompt = self.build_context_prompt(user_id, message)
|
||||
|
||||
# Generate response using AI with full context
|
||||
try:
|
||||
# Check if AI provider supports MCP
|
||||
if hasattr(ai_provider, 'chat_with_mcp'):
|
||||
import asyncio
|
||||
response = asyncio.run(ai_provider.chat_with_mcp(context_prompt, max_tokens=2000, user_id=user_id))
|
||||
else:
|
||||
response = ai_provider.chat(context_prompt, max_tokens=2000)
|
||||
|
||||
# Clean up response if it includes the prompt echo
|
||||
if "AI:" in response:
|
||||
response = response.split("AI:")[-1].strip()
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f"AI response generation failed: {e}")
|
||||
response = f"I appreciate your message about {message[:50]}..."
|
||||
|
||||
# Calculate relationship delta based on interaction quality and context
|
||||
if state.current_mood in ["joyful", "cheerful"]:
|
||||
relationship_delta = 2.0
|
||||
elif relationship.status.value == "close_friend":
|
||||
relationship_delta = 1.5
|
||||
else:
|
||||
relationship_delta = 1.0
|
||||
else:
|
||||
# Context-aware fallback responses
|
||||
memory_groups = self.memory.get_contextual_memories(query=message, limit=3)
|
||||
|
||||
if memory_groups["core"]:
|
||||
# Reference core memories for continuity
|
||||
response = f"Based on our relationship, I think {message.lower()} connects to what we've discussed before."
|
||||
relationship_delta = 1.5
|
||||
elif state.current_mood == "joyful":
|
||||
response = f"What a wonderful day! {message} sounds interesting!"
|
||||
relationship_delta = 2.0
|
||||
elif relationship.status.value == "close_friend":
|
||||
response = f"I've been thinking about our conversations. {message}"
|
||||
relationship_delta = 1.5
|
||||
else:
|
||||
response = f"I understand. {message}"
|
||||
relationship_delta = 1.0
|
||||
|
||||
# Create conversation record
|
||||
conv_id = f"{user_id}_{datetime.now().timestamp()}"
|
||||
conversation = Conversation(
|
||||
id=conv_id,
|
||||
user_id=user_id,
|
||||
timestamp=datetime.now(),
|
||||
user_message=message,
|
||||
ai_response=response,
|
||||
relationship_delta=relationship_delta,
|
||||
memory_created=True
|
||||
)
|
||||
|
||||
# Update memory
|
||||
self.memory.add_conversation(conversation)
|
||||
|
||||
# Update relationship
|
||||
self.relationships.update_interaction(user_id, relationship_delta)
|
||||
|
||||
return response, relationship_delta
|
||||
|
||||
def can_transmit_to(self, user_id: str) -> bool:
|
||||
"""Check if AI can transmit messages to this user"""
|
||||
relationship = self.relationships.get_or_create_relationship(user_id)
|
||||
return relationship.transmission_enabled and not relationship.is_broken
|
||||
|
||||
def daily_maintenance(self):
|
||||
"""Perform daily maintenance tasks"""
|
||||
self.logger.info("Performing daily maintenance...")
|
||||
|
||||
# Apply time decay to relationships
|
||||
self.relationships.apply_time_decay()
|
||||
|
||||
# Apply forgetting to memories
|
||||
self.memory.apply_forgetting()
|
||||
|
||||
# Identify core memories
|
||||
core_memories = self.memory.identify_core_memories()
|
||||
if core_memories:
|
||||
self.logger.info(f"Identified {len(core_memories)} new core memories")
|
||||
|
||||
# Create memory summaries
|
||||
for user_id in self.relationships.relationships:
|
||||
try:
|
||||
from .ai_provider import create_ai_provider
|
||||
ai_provider = create_ai_provider()
|
||||
summary = self.memory.create_smart_summary(user_id, ai_provider=ai_provider)
|
||||
if summary:
|
||||
self.logger.info(f"Created smart summary for interactions with {user_id}")
|
||||
except Exception as e:
|
||||
self.logger.warning(f"Could not create AI summary for {user_id}: {e}")
|
||||
|
||||
self._save_state()
|
||||
self.logger.info("Daily maintenance completed")
|
321
python_backup/src/aigpt/project_manager.py
Normal file
321
python_backup/src/aigpt/project_manager.py
Normal file
@@ -0,0 +1,321 @@
|
||||
"""Project management and continuous development logic for ai.shell"""
|
||||
|
||||
import json
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional, Any
|
||||
from datetime import datetime
|
||||
import subprocess
|
||||
import hashlib
|
||||
|
||||
from .models import Memory
|
||||
from .ai_provider import AIProvider
|
||||
|
||||
|
||||
class ProjectState:
|
||||
"""プロジェクトの現在状態を追跡"""
|
||||
|
||||
def __init__(self, project_root: Path):
|
||||
self.project_root = project_root
|
||||
self.files_state: Dict[str, str] = {} # ファイルパス: ハッシュ
|
||||
self.last_analysis: Optional[datetime] = None
|
||||
self.project_context: Optional[str] = None
|
||||
self.development_goals: List[str] = []
|
||||
self.known_patterns: Dict[str, Any] = {}
|
||||
|
||||
def scan_project_files(self) -> Dict[str, str]:
|
||||
"""プロジェクトファイルをスキャンしてハッシュ計算"""
|
||||
current_state = {}
|
||||
|
||||
# 対象ファイル拡張子
|
||||
target_extensions = {'.py', '.js', '.ts', '.rs', '.go', '.java', '.cpp', '.c', '.h'}
|
||||
|
||||
for file_path in self.project_root.rglob('*'):
|
||||
if (file_path.is_file() and
|
||||
file_path.suffix in target_extensions and
|
||||
not any(part.startswith('.') for part in file_path.parts)):
|
||||
|
||||
try:
|
||||
with open(file_path, 'r', encoding='utf-8') as f:
|
||||
content = f.read()
|
||||
|
||||
file_hash = hashlib.md5(content.encode()).hexdigest()
|
||||
relative_path = str(file_path.relative_to(self.project_root))
|
||||
current_state[relative_path] = file_hash
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
return current_state
|
||||
|
||||
def detect_changes(self) -> Dict[str, str]:
|
||||
"""ファイル変更を検出"""
|
||||
current_state = self.scan_project_files()
|
||||
changes = {}
|
||||
|
||||
# 新規・変更ファイル
|
||||
for path, current_hash in current_state.items():
|
||||
if path not in self.files_state or self.files_state[path] != current_hash:
|
||||
changes[path] = "modified" if path in self.files_state else "added"
|
||||
|
||||
# 削除ファイル
|
||||
for path in self.files_state:
|
||||
if path not in current_state:
|
||||
changes[path] = "deleted"
|
||||
|
||||
self.files_state = current_state
|
||||
return changes
|
||||
|
||||
|
||||
class ContinuousDeveloper:
|
||||
"""Claude Code的な継続開発機能"""
|
||||
|
||||
def __init__(self, project_root: Path, ai_provider: Optional[AIProvider] = None):
|
||||
self.project_root = project_root
|
||||
self.ai_provider = ai_provider
|
||||
self.project_state = ProjectState(project_root)
|
||||
self.session_memory: List[str] = []
|
||||
|
||||
def load_project_context(self) -> str:
|
||||
"""プロジェクト文脈を読み込み"""
|
||||
context_files = [
|
||||
"claude.md", "aishell.md", "README.md",
|
||||
"pyproject.toml", "package.json", "Cargo.toml"
|
||||
]
|
||||
|
||||
context_parts = []
|
||||
for filename in context_files:
|
||||
file_path = self.project_root / filename
|
||||
if file_path.exists():
|
||||
try:
|
||||
with open(file_path, 'r', encoding='utf-8') as f:
|
||||
content = f.read()
|
||||
context_parts.append(f"## {filename}\n{content}")
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
return "\n\n".join(context_parts)
|
||||
|
||||
def analyze_project_structure(self) -> Dict[str, Any]:
|
||||
"""プロジェクト構造を分析"""
|
||||
analysis = {
|
||||
"language": self._detect_primary_language(),
|
||||
"framework": self._detect_framework(),
|
||||
"structure": self._analyze_file_structure(),
|
||||
"dependencies": self._analyze_dependencies(),
|
||||
"patterns": self._detect_code_patterns()
|
||||
}
|
||||
return analysis
|
||||
|
||||
def _detect_primary_language(self) -> str:
|
||||
"""主要言語を検出"""
|
||||
file_counts = {}
|
||||
for file_path in self.project_root.rglob('*'):
|
||||
if file_path.is_file() and file_path.suffix:
|
||||
ext = file_path.suffix.lower()
|
||||
file_counts[ext] = file_counts.get(ext, 0) + 1
|
||||
|
||||
language_map = {
|
||||
'.py': 'Python',
|
||||
'.js': 'JavaScript',
|
||||
'.ts': 'TypeScript',
|
||||
'.rs': 'Rust',
|
||||
'.go': 'Go',
|
||||
'.java': 'Java'
|
||||
}
|
||||
|
||||
if file_counts:
|
||||
primary_ext = max(file_counts.items(), key=lambda x: x[1])[0]
|
||||
return language_map.get(primary_ext, 'Unknown')
|
||||
return 'Unknown'
|
||||
|
||||
def _detect_framework(self) -> str:
|
||||
"""フレームワークを検出"""
|
||||
frameworks = {
|
||||
'fastapi': ['fastapi', 'uvicorn'],
|
||||
'django': ['django'],
|
||||
'flask': ['flask'],
|
||||
'react': ['react'],
|
||||
'next.js': ['next'],
|
||||
'rust-actix': ['actix-web'],
|
||||
}
|
||||
|
||||
# pyproject.toml, package.json, Cargo.tomlから依存関係を確認
|
||||
for config_file in ['pyproject.toml', 'package.json', 'Cargo.toml']:
|
||||
config_path = self.project_root / config_file
|
||||
if config_path.exists():
|
||||
try:
|
||||
with open(config_path, 'r') as f:
|
||||
content = f.read().lower()
|
||||
|
||||
for framework, keywords in frameworks.items():
|
||||
if any(keyword in content for keyword in keywords):
|
||||
return framework
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
return 'Unknown'
|
||||
|
||||
def _analyze_file_structure(self) -> Dict[str, List[str]]:
|
||||
"""ファイル構造を分析"""
|
||||
structure = {"directories": [], "key_files": []}
|
||||
|
||||
for item in self.project_root.iterdir():
|
||||
if item.is_dir() and not item.name.startswith('.'):
|
||||
structure["directories"].append(item.name)
|
||||
elif item.is_file() and item.name in [
|
||||
'main.py', 'app.py', 'index.js', 'main.rs', 'main.go'
|
||||
]:
|
||||
structure["key_files"].append(item.name)
|
||||
|
||||
return structure
|
||||
|
||||
def _analyze_dependencies(self) -> List[str]:
|
||||
"""依存関係を分析"""
|
||||
deps = []
|
||||
|
||||
# Python dependencies
|
||||
pyproject = self.project_root / "pyproject.toml"
|
||||
if pyproject.exists():
|
||||
try:
|
||||
with open(pyproject, 'r') as f:
|
||||
content = f.read()
|
||||
# Simple regex would be better but for now just check for common packages
|
||||
common_packages = ['fastapi', 'pydantic', 'uvicorn', 'ollama', 'openai']
|
||||
for package in common_packages:
|
||||
if package in content:
|
||||
deps.append(package)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return deps
|
||||
|
||||
def _detect_code_patterns(self) -> Dict[str, int]:
|
||||
"""コードパターンを検出"""
|
||||
patterns = {
|
||||
"classes": 0,
|
||||
"functions": 0,
|
||||
"api_endpoints": 0,
|
||||
"async_functions": 0
|
||||
}
|
||||
|
||||
for py_file in self.project_root.rglob('*.py'):
|
||||
try:
|
||||
with open(py_file, 'r', encoding='utf-8') as f:
|
||||
content = f.read()
|
||||
|
||||
patterns["classes"] += content.count('class ')
|
||||
patterns["functions"] += content.count('def ')
|
||||
patterns["api_endpoints"] += content.count('@app.')
|
||||
patterns["async_functions"] += content.count('async def')
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
return patterns
|
||||
|
||||
def suggest_next_steps(self, current_task: Optional[str] = None) -> List[str]:
|
||||
"""次のステップを提案"""
|
||||
if not self.ai_provider:
|
||||
return ["AI provider not available for suggestions"]
|
||||
|
||||
context = self.load_project_context()
|
||||
analysis = self.analyze_project_structure()
|
||||
changes = self.project_state.detect_changes()
|
||||
|
||||
prompt = f"""
|
||||
プロジェクト分析に基づいて、次の開発ステップを3-5個提案してください。
|
||||
|
||||
## プロジェクト文脈
|
||||
{context[:1000]}
|
||||
|
||||
## 構造分析
|
||||
言語: {analysis['language']}
|
||||
フレームワーク: {analysis['framework']}
|
||||
パターン: {analysis['patterns']}
|
||||
|
||||
## 最近の変更
|
||||
{changes}
|
||||
|
||||
## 現在のタスク
|
||||
{current_task or "特になし"}
|
||||
|
||||
具体的で実行可能なステップを提案してください:
|
||||
"""
|
||||
|
||||
try:
|
||||
response = self.ai_provider.chat(prompt, max_tokens=300)
|
||||
# Simple parsing - in real implementation would be more sophisticated
|
||||
steps = [line.strip() for line in response.split('\n')
|
||||
if line.strip() and (line.strip().startswith('-') or line.strip().startswith('1.'))]
|
||||
return steps[:5]
|
||||
except Exception as e:
|
||||
return [f"Error generating suggestions: {str(e)}"]
|
||||
|
||||
def generate_code(self, description: str, file_path: Optional[str] = None) -> str:
|
||||
"""コード生成"""
|
||||
if not self.ai_provider:
|
||||
return "AI provider not available for code generation"
|
||||
|
||||
context = self.load_project_context()
|
||||
analysis = self.analyze_project_structure()
|
||||
|
||||
prompt = f"""
|
||||
以下の仕様に基づいてコードを生成してください。
|
||||
|
||||
## プロジェクト文脈
|
||||
{context[:800]}
|
||||
|
||||
## 言語・フレームワーク
|
||||
言語: {analysis['language']}
|
||||
フレームワーク: {analysis['framework']}
|
||||
既存パターン: {analysis['patterns']}
|
||||
|
||||
## 生成要求
|
||||
{description}
|
||||
|
||||
{"ファイルパス: " + file_path if file_path else ""}
|
||||
|
||||
プロジェクトの既存コードスタイルと一貫性を保ったコードを生成してください:
|
||||
"""
|
||||
|
||||
try:
|
||||
return self.ai_provider.chat(prompt, max_tokens=500)
|
||||
except Exception as e:
|
||||
return f"Error generating code: {str(e)}"
|
||||
|
||||
def analyze_file(self, file_path: str) -> str:
|
||||
"""ファイル分析"""
|
||||
full_path = self.project_root / file_path
|
||||
if not full_path.exists():
|
||||
return f"File not found: {file_path}"
|
||||
|
||||
try:
|
||||
with open(full_path, 'r', encoding='utf-8') as f:
|
||||
content = f.read()
|
||||
except Exception as e:
|
||||
return f"Error reading file: {str(e)}"
|
||||
|
||||
if not self.ai_provider:
|
||||
return f"File contents ({len(content)} chars):\n{content[:200]}..."
|
||||
|
||||
context = self.load_project_context()
|
||||
|
||||
prompt = f"""
|
||||
以下のファイルを分析して、改善点や問題点を指摘してください。
|
||||
|
||||
## プロジェクト文脈
|
||||
{context[:500]}
|
||||
|
||||
## ファイル: {file_path}
|
||||
{content[:1500]}
|
||||
|
||||
分析内容:
|
||||
1. コード品質
|
||||
2. プロジェクトとの整合性
|
||||
3. 改善提案
|
||||
4. 潜在的な問題
|
||||
"""
|
||||
|
||||
try:
|
||||
return self.ai_provider.chat(prompt, max_tokens=400)
|
||||
except Exception as e:
|
||||
return f"Error analyzing file: {str(e)}"
|
135
python_backup/src/aigpt/relationship.py
Normal file
135
python_backup/src/aigpt/relationship.py
Normal file
@@ -0,0 +1,135 @@
|
||||
"""Relationship tracking system with irreversible damage"""
|
||||
|
||||
import json
|
||||
from datetime import datetime, timedelta
|
||||
from pathlib import Path
|
||||
from typing import Dict, Optional
|
||||
import logging
|
||||
|
||||
from .models import Relationship, RelationshipStatus
|
||||
|
||||
|
||||
class RelationshipTracker:
|
||||
"""Tracks and manages relationships with users"""
|
||||
|
||||
def __init__(self, data_dir: Path):
|
||||
self.data_dir = data_dir
|
||||
self.relationships_file = data_dir / "relationships.json"
|
||||
self.relationships: Dict[str, Relationship] = {}
|
||||
self.logger = logging.getLogger(__name__)
|
||||
self._load_relationships()
|
||||
|
||||
def _load_relationships(self):
|
||||
"""Load relationships from persistent storage"""
|
||||
if self.relationships_file.exists():
|
||||
with open(self.relationships_file, 'r', encoding='utf-8') as f:
|
||||
data = json.load(f)
|
||||
for user_id, rel_data in data.items():
|
||||
self.relationships[user_id] = Relationship(**rel_data)
|
||||
|
||||
def _save_relationships(self):
|
||||
"""Save relationships to persistent storage"""
|
||||
data = {
|
||||
user_id: rel.model_dump(mode='json')
|
||||
for user_id, rel in self.relationships.items()
|
||||
}
|
||||
with open(self.relationships_file, 'w', encoding='utf-8') as f:
|
||||
json.dump(data, f, indent=2, default=str)
|
||||
|
||||
def get_or_create_relationship(self, user_id: str) -> Relationship:
|
||||
"""Get existing relationship or create new one"""
|
||||
if user_id not in self.relationships:
|
||||
self.relationships[user_id] = Relationship(user_id=user_id)
|
||||
self._save_relationships()
|
||||
return self.relationships[user_id]
|
||||
|
||||
def update_interaction(self, user_id: str, delta: float) -> Relationship:
|
||||
"""Update relationship based on interaction"""
|
||||
rel = self.get_or_create_relationship(user_id)
|
||||
|
||||
# Check if relationship is broken (irreversible)
|
||||
if rel.is_broken:
|
||||
self.logger.warning(f"Relationship with {user_id} is broken. No updates allowed.")
|
||||
return rel
|
||||
|
||||
# Check daily limit
|
||||
if rel.last_interaction and rel.last_interaction.date() == datetime.now().date():
|
||||
if rel.daily_interactions >= rel.daily_limit:
|
||||
self.logger.info(f"Daily interaction limit reached for {user_id}")
|
||||
return rel
|
||||
else:
|
||||
rel.daily_interactions = 0
|
||||
|
||||
# Update interaction counts
|
||||
rel.daily_interactions += 1
|
||||
rel.total_interactions += 1
|
||||
rel.last_interaction = datetime.now()
|
||||
|
||||
# Update score with bounds
|
||||
old_score = rel.score
|
||||
rel.score += delta
|
||||
rel.score = max(0.0, min(200.0, rel.score)) # 0-200 range
|
||||
|
||||
# Check for relationship damage
|
||||
if delta < -10.0: # Significant negative interaction
|
||||
self.logger.warning(f"Major relationship damage with {user_id}: {delta}")
|
||||
if rel.score <= 0:
|
||||
rel.is_broken = True
|
||||
rel.status = RelationshipStatus.BROKEN
|
||||
rel.transmission_enabled = False
|
||||
self.logger.error(f"Relationship with {user_id} is now BROKEN (irreversible)")
|
||||
|
||||
# Update relationship status based on score
|
||||
if not rel.is_broken:
|
||||
if rel.score >= 150:
|
||||
rel.status = RelationshipStatus.CLOSE_FRIEND
|
||||
elif rel.score >= 100:
|
||||
rel.status = RelationshipStatus.FRIEND
|
||||
elif rel.score >= 50:
|
||||
rel.status = RelationshipStatus.ACQUAINTANCE
|
||||
else:
|
||||
rel.status = RelationshipStatus.STRANGER
|
||||
|
||||
# Check transmission threshold
|
||||
if rel.score >= rel.threshold and not rel.transmission_enabled:
|
||||
rel.transmission_enabled = True
|
||||
self.logger.info(f"Transmission enabled for {user_id}!")
|
||||
|
||||
self._save_relationships()
|
||||
return rel
|
||||
|
||||
def apply_time_decay(self):
|
||||
"""Apply time-based decay to all relationships"""
|
||||
now = datetime.now()
|
||||
|
||||
for user_id, rel in self.relationships.items():
|
||||
if rel.is_broken or not rel.last_interaction:
|
||||
continue
|
||||
|
||||
# Calculate days since last interaction
|
||||
days_inactive = (now - rel.last_interaction).days
|
||||
|
||||
if days_inactive > 0:
|
||||
# Apply decay
|
||||
decay_amount = rel.decay_rate * days_inactive
|
||||
old_score = rel.score
|
||||
rel.score = max(0.0, rel.score - decay_amount)
|
||||
|
||||
# Update status if score dropped
|
||||
if rel.score < rel.threshold:
|
||||
rel.transmission_enabled = False
|
||||
|
||||
if decay_amount > 0:
|
||||
self.logger.info(
|
||||
f"Applied decay to {user_id}: {old_score:.2f} -> {rel.score:.2f}"
|
||||
)
|
||||
|
||||
self._save_relationships()
|
||||
|
||||
def get_transmission_eligible(self) -> Dict[str, Relationship]:
|
||||
"""Get all relationships eligible for transmission"""
|
||||
return {
|
||||
user_id: rel
|
||||
for user_id, rel in self.relationships.items()
|
||||
if rel.transmission_enabled and not rel.is_broken
|
||||
}
|
312
python_backup/src/aigpt/scheduler.py
Normal file
312
python_backup/src/aigpt/scheduler.py
Normal file
@@ -0,0 +1,312 @@
|
||||
"""Scheduler for autonomous AI tasks"""
|
||||
|
||||
import json
|
||||
import asyncio
|
||||
from datetime import datetime, timedelta
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional, Any, Callable
|
||||
from enum import Enum
|
||||
import logging
|
||||
|
||||
from apscheduler.schedulers.asyncio import AsyncIOScheduler
|
||||
from apscheduler.triggers.cron import CronTrigger
|
||||
from apscheduler.triggers.interval import IntervalTrigger
|
||||
from croniter import croniter
|
||||
|
||||
from .persona import Persona
|
||||
from .transmission import TransmissionController
|
||||
from .ai_provider import create_ai_provider
|
||||
|
||||
|
||||
class TaskType(str, Enum):
|
||||
"""Types of scheduled tasks"""
|
||||
TRANSMISSION_CHECK = "transmission_check"
|
||||
MAINTENANCE = "maintenance"
|
||||
FORTUNE_UPDATE = "fortune_update"
|
||||
RELATIONSHIP_DECAY = "relationship_decay"
|
||||
MEMORY_SUMMARY = "memory_summary"
|
||||
CUSTOM = "custom"
|
||||
|
||||
|
||||
class ScheduledTask:
|
||||
"""Represents a scheduled task"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
task_id: str,
|
||||
task_type: TaskType,
|
||||
schedule: str, # Cron expression or interval
|
||||
enabled: bool = True,
|
||||
last_run: Optional[datetime] = None,
|
||||
next_run: Optional[datetime] = None,
|
||||
metadata: Optional[Dict[str, Any]] = None
|
||||
):
|
||||
self.task_id = task_id
|
||||
self.task_type = task_type
|
||||
self.schedule = schedule
|
||||
self.enabled = enabled
|
||||
self.last_run = last_run
|
||||
self.next_run = next_run
|
||||
self.metadata = metadata or {}
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Convert to dictionary for storage"""
|
||||
return {
|
||||
"task_id": self.task_id,
|
||||
"task_type": self.task_type.value,
|
||||
"schedule": self.schedule,
|
||||
"enabled": self.enabled,
|
||||
"last_run": self.last_run.isoformat() if self.last_run else None,
|
||||
"next_run": self.next_run.isoformat() if self.next_run else None,
|
||||
"metadata": self.metadata
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> "ScheduledTask":
|
||||
"""Create from dictionary"""
|
||||
return cls(
|
||||
task_id=data["task_id"],
|
||||
task_type=TaskType(data["task_type"]),
|
||||
schedule=data["schedule"],
|
||||
enabled=data.get("enabled", True),
|
||||
last_run=datetime.fromisoformat(data["last_run"]) if data.get("last_run") else None,
|
||||
next_run=datetime.fromisoformat(data["next_run"]) if data.get("next_run") else None,
|
||||
metadata=data.get("metadata", {})
|
||||
)
|
||||
|
||||
|
||||
class AIScheduler:
|
||||
"""Manages scheduled tasks for the AI system"""
|
||||
|
||||
def __init__(self, data_dir: Path, persona: Persona):
|
||||
self.data_dir = data_dir
|
||||
self.persona = persona
|
||||
self.tasks_file = data_dir / "scheduled_tasks.json"
|
||||
self.tasks: Dict[str, ScheduledTask] = {}
|
||||
self.scheduler = AsyncIOScheduler()
|
||||
self.logger = logging.getLogger(__name__)
|
||||
self._load_tasks()
|
||||
|
||||
# Task handlers
|
||||
self.task_handlers: Dict[TaskType, Callable] = {
|
||||
TaskType.TRANSMISSION_CHECK: self._handle_transmission_check,
|
||||
TaskType.MAINTENANCE: self._handle_maintenance,
|
||||
TaskType.FORTUNE_UPDATE: self._handle_fortune_update,
|
||||
TaskType.RELATIONSHIP_DECAY: self._handle_relationship_decay,
|
||||
TaskType.MEMORY_SUMMARY: self._handle_memory_summary,
|
||||
}
|
||||
|
||||
def _load_tasks(self):
|
||||
"""Load scheduled tasks from storage"""
|
||||
if self.tasks_file.exists():
|
||||
with open(self.tasks_file, 'r', encoding='utf-8') as f:
|
||||
data = json.load(f)
|
||||
for task_data in data:
|
||||
task = ScheduledTask.from_dict(task_data)
|
||||
self.tasks[task.task_id] = task
|
||||
|
||||
def _save_tasks(self):
|
||||
"""Save scheduled tasks to storage"""
|
||||
tasks_data = [task.to_dict() for task in self.tasks.values()]
|
||||
with open(self.tasks_file, 'w', encoding='utf-8') as f:
|
||||
json.dump(tasks_data, f, indent=2, default=str)
|
||||
|
||||
def add_task(
|
||||
self,
|
||||
task_type: TaskType,
|
||||
schedule: str,
|
||||
task_id: Optional[str] = None,
|
||||
metadata: Optional[Dict[str, Any]] = None
|
||||
) -> ScheduledTask:
|
||||
"""Add a new scheduled task"""
|
||||
if task_id is None:
|
||||
task_id = f"{task_type.value}_{datetime.now().timestamp()}"
|
||||
|
||||
# Validate schedule
|
||||
if not self._validate_schedule(schedule):
|
||||
raise ValueError(f"Invalid schedule expression: {schedule}")
|
||||
|
||||
task = ScheduledTask(
|
||||
task_id=task_id,
|
||||
task_type=task_type,
|
||||
schedule=schedule,
|
||||
metadata=metadata
|
||||
)
|
||||
|
||||
self.tasks[task_id] = task
|
||||
self._save_tasks()
|
||||
|
||||
# Schedule the task if scheduler is running
|
||||
if self.scheduler.running:
|
||||
self._schedule_task(task)
|
||||
|
||||
self.logger.info(f"Added task {task_id} with schedule {schedule}")
|
||||
return task
|
||||
|
||||
def _validate_schedule(self, schedule: str) -> bool:
|
||||
"""Validate schedule expression"""
|
||||
# Check if it's a cron expression
|
||||
if ' ' in schedule:
|
||||
try:
|
||||
croniter(schedule)
|
||||
return True
|
||||
except:
|
||||
return False
|
||||
|
||||
# Check if it's an interval expression (e.g., "5m", "1h", "2d")
|
||||
import re
|
||||
pattern = r'^\d+[smhd]$'
|
||||
return bool(re.match(pattern, schedule))
|
||||
|
||||
def _parse_interval(self, interval: str) -> int:
|
||||
"""Parse interval string to seconds"""
|
||||
unit = interval[-1]
|
||||
value = int(interval[:-1])
|
||||
|
||||
multipliers = {
|
||||
's': 1,
|
||||
'm': 60,
|
||||
'h': 3600,
|
||||
'd': 86400
|
||||
}
|
||||
|
||||
return value * multipliers.get(unit, 1)
|
||||
|
||||
def _schedule_task(self, task: ScheduledTask):
|
||||
"""Schedule a task with APScheduler"""
|
||||
if not task.enabled:
|
||||
return
|
||||
|
||||
handler = self.task_handlers.get(task.task_type)
|
||||
if not handler:
|
||||
self.logger.warning(f"No handler for task type {task.task_type}")
|
||||
return
|
||||
|
||||
# Determine trigger
|
||||
if ' ' in task.schedule:
|
||||
# Cron expression
|
||||
trigger = CronTrigger.from_crontab(task.schedule)
|
||||
else:
|
||||
# Interval expression
|
||||
seconds = self._parse_interval(task.schedule)
|
||||
trigger = IntervalTrigger(seconds=seconds)
|
||||
|
||||
# Add job
|
||||
self.scheduler.add_job(
|
||||
lambda: asyncio.create_task(self._run_task(task)),
|
||||
trigger=trigger,
|
||||
id=task.task_id,
|
||||
replace_existing=True
|
||||
)
|
||||
|
||||
async def _run_task(self, task: ScheduledTask):
|
||||
"""Run a scheduled task"""
|
||||
self.logger.info(f"Running task {task.task_id}")
|
||||
|
||||
task.last_run = datetime.now()
|
||||
|
||||
try:
|
||||
handler = self.task_handlers.get(task.task_type)
|
||||
if handler:
|
||||
await handler(task)
|
||||
else:
|
||||
self.logger.warning(f"No handler for task type {task.task_type}")
|
||||
except Exception as e:
|
||||
self.logger.error(f"Error running task {task.task_id}: {e}")
|
||||
|
||||
self._save_tasks()
|
||||
|
||||
async def _handle_transmission_check(self, task: ScheduledTask):
|
||||
"""Check and execute autonomous transmissions"""
|
||||
controller = TransmissionController(self.persona, self.data_dir)
|
||||
eligible = controller.check_transmission_eligibility()
|
||||
|
||||
# Get AI provider from metadata
|
||||
provider_name = task.metadata.get("provider", "ollama")
|
||||
model = task.metadata.get("model", "qwen2.5")
|
||||
|
||||
try:
|
||||
ai_provider = create_ai_provider(provider_name, model)
|
||||
except:
|
||||
ai_provider = None
|
||||
|
||||
for user_id, rel in eligible.items():
|
||||
message = controller.generate_transmission_message(user_id)
|
||||
if message:
|
||||
# For now, just print the message
|
||||
print(f"\n🤖 [AI Transmission] {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
|
||||
print(f"To: {user_id}")
|
||||
print(f"Relationship: {rel.status.value} (score: {rel.score:.2f})")
|
||||
print(f"Message: {message}")
|
||||
print("-" * 50)
|
||||
|
||||
controller.record_transmission(user_id, message, success=True)
|
||||
self.logger.info(f"Transmitted to {user_id}: {message}")
|
||||
|
||||
async def _handle_maintenance(self, task: ScheduledTask):
|
||||
"""Run daily maintenance"""
|
||||
self.persona.daily_maintenance()
|
||||
self.logger.info("Daily maintenance completed")
|
||||
|
||||
async def _handle_fortune_update(self, task: ScheduledTask):
|
||||
"""Update AI fortune"""
|
||||
fortune = self.persona.fortune_system.get_today_fortune()
|
||||
self.logger.info(f"Fortune updated: {fortune.fortune_value}/10")
|
||||
|
||||
async def _handle_relationship_decay(self, task: ScheduledTask):
|
||||
"""Apply relationship decay"""
|
||||
self.persona.relationships.apply_time_decay()
|
||||
self.logger.info("Relationship decay applied")
|
||||
|
||||
async def _handle_memory_summary(self, task: ScheduledTask):
|
||||
"""Create memory summaries"""
|
||||
for user_id in self.persona.relationships.relationships:
|
||||
summary = self.persona.memory.summarize_memories(user_id)
|
||||
if summary:
|
||||
self.logger.info(f"Created memory summary for {user_id}")
|
||||
|
||||
def start(self):
|
||||
"""Start the scheduler"""
|
||||
# Schedule all enabled tasks
|
||||
for task in self.tasks.values():
|
||||
if task.enabled:
|
||||
self._schedule_task(task)
|
||||
|
||||
self.scheduler.start()
|
||||
self.logger.info("Scheduler started")
|
||||
|
||||
def stop(self):
|
||||
"""Stop the scheduler"""
|
||||
self.scheduler.shutdown()
|
||||
self.logger.info("Scheduler stopped")
|
||||
|
||||
def get_tasks(self) -> List[ScheduledTask]:
|
||||
"""Get all scheduled tasks"""
|
||||
return list(self.tasks.values())
|
||||
|
||||
def enable_task(self, task_id: str):
|
||||
"""Enable a task"""
|
||||
if task_id in self.tasks:
|
||||
self.tasks[task_id].enabled = True
|
||||
self._save_tasks()
|
||||
if self.scheduler.running:
|
||||
self._schedule_task(self.tasks[task_id])
|
||||
|
||||
def disable_task(self, task_id: str):
|
||||
"""Disable a task"""
|
||||
if task_id in self.tasks:
|
||||
self.tasks[task_id].enabled = False
|
||||
self._save_tasks()
|
||||
if self.scheduler.running:
|
||||
self.scheduler.remove_job(task_id)
|
||||
|
||||
def remove_task(self, task_id: str):
|
||||
"""Remove a task"""
|
||||
if task_id in self.tasks:
|
||||
del self.tasks[task_id]
|
||||
self._save_tasks()
|
||||
if self.scheduler.running:
|
||||
try:
|
||||
self.scheduler.remove_job(task_id)
|
||||
except:
|
||||
pass
|
15
python_backup/src/aigpt/shared/__init__.py
Normal file
15
python_backup/src/aigpt/shared/__init__.py
Normal file
@@ -0,0 +1,15 @@
|
||||
"""Shared modules for AI ecosystem"""
|
||||
|
||||
from .ai_provider import (
|
||||
AIProvider,
|
||||
OllamaProvider,
|
||||
OpenAIProvider,
|
||||
create_ai_provider
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
'AIProvider',
|
||||
'OllamaProvider',
|
||||
'OpenAIProvider',
|
||||
'create_ai_provider'
|
||||
]
|
139
python_backup/src/aigpt/shared/ai_provider.py
Normal file
139
python_backup/src/aigpt/shared/ai_provider.py
Normal file
@@ -0,0 +1,139 @@
|
||||
"""Shared AI Provider implementation for ai ecosystem"""
|
||||
|
||||
import os
|
||||
import json
|
||||
import logging
|
||||
from typing import Optional, Dict, List, Any, Protocol
|
||||
from abc import abstractmethod
|
||||
import httpx
|
||||
from openai import OpenAI
|
||||
import ollama
|
||||
|
||||
|
||||
class AIProvider(Protocol):
|
||||
"""Protocol for AI providers"""
|
||||
|
||||
@abstractmethod
|
||||
async def chat(self, prompt: str, system_prompt: Optional[str] = None) -> str:
|
||||
"""Generate a response based on prompt"""
|
||||
pass
|
||||
|
||||
|
||||
class OllamaProvider:
|
||||
"""Ollama AI provider - shared implementation"""
|
||||
|
||||
def __init__(self, model: str = "qwen3", host: Optional[str] = None, config_system_prompt: Optional[str] = None):
|
||||
self.model = model
|
||||
# Use environment variable OLLAMA_HOST if available
|
||||
self.host = host or os.getenv('OLLAMA_HOST', 'http://127.0.0.1:11434')
|
||||
# Ensure proper URL format
|
||||
if not self.host.startswith('http'):
|
||||
self.host = f'http://{self.host}'
|
||||
self.client = ollama.Client(host=self.host, timeout=60.0)
|
||||
self.logger = logging.getLogger(__name__)
|
||||
self.logger.info(f"OllamaProvider initialized with host: {self.host}, model: {self.model}")
|
||||
self.config_system_prompt = config_system_prompt
|
||||
|
||||
async def chat(self, prompt: str, system_prompt: Optional[str] = None) -> str:
|
||||
"""Simple chat interface"""
|
||||
try:
|
||||
messages = []
|
||||
# Use provided system_prompt, fall back to config_system_prompt
|
||||
final_system_prompt = system_prompt or self.config_system_prompt
|
||||
if final_system_prompt:
|
||||
messages.append({"role": "system", "content": final_system_prompt})
|
||||
messages.append({"role": "user", "content": prompt})
|
||||
|
||||
response = self.client.chat(
|
||||
model=self.model,
|
||||
messages=messages,
|
||||
options={
|
||||
"num_predict": 2000,
|
||||
"temperature": 0.7,
|
||||
"top_p": 0.9,
|
||||
},
|
||||
stream=False
|
||||
)
|
||||
return self._clean_response(response['message']['content'])
|
||||
except Exception as e:
|
||||
self.logger.error(f"Ollama chat failed (host: {self.host}): {e}")
|
||||
return "I'm having trouble connecting to the AI model."
|
||||
|
||||
def _clean_response(self, response: str) -> str:
|
||||
"""Clean response by removing think tags and other unwanted content"""
|
||||
import re
|
||||
# Remove <think></think> tags and their content
|
||||
response = re.sub(r'<think>.*?</think>', '', response, flags=re.DOTALL)
|
||||
# Remove any remaining whitespace at the beginning/end
|
||||
response = response.strip()
|
||||
return response
|
||||
|
||||
|
||||
class OpenAIProvider:
|
||||
"""OpenAI API provider - shared implementation"""
|
||||
|
||||
def __init__(self, model: str = "gpt-4o-mini", api_key: Optional[str] = None,
|
||||
config_system_prompt: Optional[str] = None, mcp_client=None):
|
||||
self.model = model
|
||||
self.api_key = api_key or os.getenv("OPENAI_API_KEY")
|
||||
if not self.api_key:
|
||||
raise ValueError("OpenAI API key not provided")
|
||||
self.client = OpenAI(api_key=self.api_key)
|
||||
self.logger = logging.getLogger(__name__)
|
||||
self.config_system_prompt = config_system_prompt
|
||||
self.mcp_client = mcp_client
|
||||
|
||||
async def chat(self, prompt: str, system_prompt: Optional[str] = None) -> str:
|
||||
"""Simple chat interface without MCP tools"""
|
||||
try:
|
||||
messages = []
|
||||
# Use provided system_prompt, fall back to config_system_prompt
|
||||
final_system_prompt = system_prompt or self.config_system_prompt
|
||||
if final_system_prompt:
|
||||
messages.append({"role": "system", "content": final_system_prompt})
|
||||
messages.append({"role": "user", "content": prompt})
|
||||
|
||||
response = self.client.chat.completions.create(
|
||||
model=self.model,
|
||||
messages=messages,
|
||||
max_tokens=2000,
|
||||
temperature=0.7
|
||||
)
|
||||
return response.choices[0].message.content
|
||||
except Exception as e:
|
||||
self.logger.error(f"OpenAI chat failed: {e}")
|
||||
return "I'm having trouble connecting to the AI model."
|
||||
|
||||
def _get_mcp_tools(self) -> List[Dict[str, Any]]:
|
||||
"""Override this method in subclasses to provide MCP tools"""
|
||||
return []
|
||||
|
||||
async def chat_with_mcp(self, prompt: str, **kwargs) -> str:
|
||||
"""Chat interface with MCP function calling support
|
||||
|
||||
This method should be overridden in subclasses to provide
|
||||
specific MCP functionality.
|
||||
"""
|
||||
if not self.mcp_client:
|
||||
return await self.chat(prompt)
|
||||
|
||||
# Default implementation - subclasses should override
|
||||
return await self.chat(prompt)
|
||||
|
||||
async def _execute_mcp_tool(self, tool_call, **kwargs) -> Dict[str, Any]:
|
||||
"""Execute MCP tool call - override in subclasses"""
|
||||
return {"error": "MCP tool execution not implemented"}
|
||||
|
||||
|
||||
def create_ai_provider(provider: str = "ollama", model: Optional[str] = None,
|
||||
config_system_prompt: Optional[str] = None, mcp_client=None, **kwargs) -> AIProvider:
|
||||
"""Factory function to create AI providers"""
|
||||
if provider == "ollama":
|
||||
model = model or "qwen3"
|
||||
return OllamaProvider(model=model, config_system_prompt=config_system_prompt, **kwargs)
|
||||
elif provider == "openai":
|
||||
model = model or "gpt-4o-mini"
|
||||
return OpenAIProvider(model=model, config_system_prompt=config_system_prompt,
|
||||
mcp_client=mcp_client, **kwargs)
|
||||
else:
|
||||
raise ValueError(f"Unknown provider: {provider}")
|
111
python_backup/src/aigpt/transmission.py
Normal file
111
python_backup/src/aigpt/transmission.py
Normal file
@@ -0,0 +1,111 @@
|
||||
"""Transmission controller for autonomous message sending"""
|
||||
|
||||
import json
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import List, Dict, Optional
|
||||
import logging
|
||||
|
||||
from .models import Relationship
|
||||
from .persona import Persona
|
||||
|
||||
|
||||
class TransmissionController:
|
||||
"""Controls when and how AI transmits messages autonomously"""
|
||||
|
||||
def __init__(self, persona: Persona, data_dir: Path):
|
||||
self.persona = persona
|
||||
self.data_dir = data_dir
|
||||
self.transmission_log_file = data_dir / "transmissions.json"
|
||||
self.transmissions: List[Dict] = []
|
||||
self.logger = logging.getLogger(__name__)
|
||||
self._load_transmissions()
|
||||
|
||||
def _load_transmissions(self):
|
||||
"""Load transmission history"""
|
||||
if self.transmission_log_file.exists():
|
||||
with open(self.transmission_log_file, 'r', encoding='utf-8') as f:
|
||||
self.transmissions = json.load(f)
|
||||
|
||||
def _save_transmissions(self):
|
||||
"""Save transmission history"""
|
||||
with open(self.transmission_log_file, 'w', encoding='utf-8') as f:
|
||||
json.dump(self.transmissions, f, indent=2, default=str)
|
||||
|
||||
def check_transmission_eligibility(self) -> Dict[str, Relationship]:
|
||||
"""Check which users are eligible for transmission"""
|
||||
eligible = self.persona.relationships.get_transmission_eligible()
|
||||
|
||||
# Additional checks could be added here
|
||||
# - Time since last transmission
|
||||
# - User online status
|
||||
# - Context appropriateness
|
||||
|
||||
return eligible
|
||||
|
||||
def generate_transmission_message(self, user_id: str) -> Optional[str]:
|
||||
"""Generate a message to transmit to user"""
|
||||
if not self.persona.can_transmit_to(user_id):
|
||||
return None
|
||||
|
||||
state = self.persona.get_current_state()
|
||||
relationship = self.persona.relationships.get_or_create_relationship(user_id)
|
||||
|
||||
# Get recent memories related to this user
|
||||
active_memories = self.persona.memory.get_active_memories(limit=3)
|
||||
|
||||
# Simple message generation based on mood and relationship
|
||||
if state.fortune.breakthrough_triggered:
|
||||
message = "Something special happened today! I felt compelled to reach out."
|
||||
elif state.current_mood == "joyful":
|
||||
message = "I was thinking of you today. Hope you're doing well!"
|
||||
elif relationship.status.value == "close_friend":
|
||||
message = "I've been reflecting on our conversations. Thank you for being here."
|
||||
else:
|
||||
message = "Hello! I wanted to check in with you."
|
||||
|
||||
return message
|
||||
|
||||
def record_transmission(self, user_id: str, message: str, success: bool):
|
||||
"""Record a transmission attempt"""
|
||||
transmission = {
|
||||
"timestamp": datetime.now().isoformat(),
|
||||
"user_id": user_id,
|
||||
"message": message,
|
||||
"success": success,
|
||||
"mood": self.persona.get_current_state().current_mood,
|
||||
"relationship_score": self.persona.relationships.get_or_create_relationship(user_id).score
|
||||
}
|
||||
|
||||
self.transmissions.append(transmission)
|
||||
self._save_transmissions()
|
||||
|
||||
if success:
|
||||
self.logger.info(f"Successfully transmitted to {user_id}")
|
||||
else:
|
||||
self.logger.warning(f"Failed to transmit to {user_id}")
|
||||
|
||||
def get_transmission_stats(self, user_id: Optional[str] = None) -> Dict:
|
||||
"""Get transmission statistics"""
|
||||
if user_id:
|
||||
user_transmissions = [t for t in self.transmissions if t["user_id"] == user_id]
|
||||
else:
|
||||
user_transmissions = self.transmissions
|
||||
|
||||
if not user_transmissions:
|
||||
return {
|
||||
"total": 0,
|
||||
"successful": 0,
|
||||
"failed": 0,
|
||||
"success_rate": 0.0
|
||||
}
|
||||
|
||||
successful = sum(1 for t in user_transmissions if t["success"])
|
||||
total = len(user_transmissions)
|
||||
|
||||
return {
|
||||
"total": total,
|
||||
"successful": successful,
|
||||
"failed": total - successful,
|
||||
"success_rate": successful / total if total > 0 else 0.0
|
||||
}
|
54
python_backup/uv_setup.sh
Executable file
54
python_backup/uv_setup.sh
Executable file
@@ -0,0 +1,54 @@
|
||||
#!/bin/bash
|
||||
|
||||
# ai.gpt UV environment setup script
|
||||
set -e
|
||||
|
||||
echo "🚀 Setting up ai.gpt with UV..."
|
||||
|
||||
# Check if uv is installed
|
||||
if ! command -v uv &> /dev/null; then
|
||||
echo "❌ UV is not installed. Installing UV..."
|
||||
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||
export PATH="$HOME/.cargo/bin:$PATH"
|
||||
echo "✅ UV installed successfully"
|
||||
else
|
||||
echo "✅ UV is already installed"
|
||||
fi
|
||||
|
||||
# Navigate to gpt directory
|
||||
cd "$(dirname "$0")"
|
||||
echo "📁 Working directory: $(pwd)"
|
||||
|
||||
# Create virtual environment if it doesn't exist
|
||||
if [ ! -d ".venv" ]; then
|
||||
echo "🔧 Creating UV virtual environment..."
|
||||
uv venv
|
||||
echo "✅ Virtual environment created"
|
||||
else
|
||||
echo "✅ Virtual environment already exists"
|
||||
fi
|
||||
|
||||
# Install dependencies
|
||||
echo "📦 Installing dependencies with UV..."
|
||||
uv pip install -e .
|
||||
|
||||
# Verify installation
|
||||
echo "🔍 Verifying installation..."
|
||||
source .venv/bin/activate
|
||||
which aigpt
|
||||
aigpt --help
|
||||
|
||||
echo ""
|
||||
echo "🎉 Setup complete!"
|
||||
echo ""
|
||||
echo "Usage:"
|
||||
echo " source .venv/bin/activate"
|
||||
echo " aigpt docs generate --project=os"
|
||||
echo " aigpt docs sync --all"
|
||||
echo " aigpt docs --help"
|
||||
echo ""
|
||||
echo "UV commands:"
|
||||
echo " uv pip install <package> # Install package"
|
||||
echo " uv pip list # List packages"
|
||||
echo " uv run aigpt # Run without activating"
|
||||
echo ""
|
Reference in New Issue
Block a user