This commit is contained in:
2025-06-08 06:41:41 +09:00
parent 582b983a32
commit ed6d6e0d47
68 changed files with 1998 additions and 1036 deletions

246
src/ai_provider.rs Normal file
View File

@@ -0,0 +1,246 @@
use anyhow::{Result, anyhow};
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum AIProvider {
OpenAI,
Ollama,
Claude,
}
impl std::fmt::Display for AIProvider {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
AIProvider::OpenAI => write!(f, "openai"),
AIProvider::Ollama => write!(f, "ollama"),
AIProvider::Claude => write!(f, "claude"),
}
}
}
impl std::str::FromStr for AIProvider {
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self> {
match s.to_lowercase().as_str() {
"openai" | "gpt" => Ok(AIProvider::OpenAI),
"ollama" => Ok(AIProvider::Ollama),
"claude" => Ok(AIProvider::Claude),
_ => Err(anyhow!("Unknown AI provider: {}", s)),
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AIConfig {
pub provider: AIProvider,
pub model: String,
pub api_key: Option<String>,
pub base_url: Option<String>,
pub max_tokens: Option<u32>,
pub temperature: Option<f32>,
}
impl Default for AIConfig {
fn default() -> Self {
AIConfig {
provider: AIProvider::Ollama,
model: "llama2".to_string(),
api_key: None,
base_url: Some("http://localhost:11434".to_string()),
max_tokens: Some(2048),
temperature: Some(0.7),
}
}
}
#[derive(Debug, Clone)]
pub struct ChatMessage {
pub role: String,
pub content: String,
}
#[derive(Debug, Clone)]
pub struct ChatResponse {
pub content: String,
pub tokens_used: Option<u32>,
pub model: String,
}
pub struct AIProviderClient {
config: AIConfig,
http_client: reqwest::Client,
}
impl AIProviderClient {
pub fn new(config: AIConfig) -> Self {
let http_client = reqwest::Client::new();
AIProviderClient {
config,
http_client,
}
}
pub async fn chat(&self, messages: Vec<ChatMessage>, system_prompt: Option<String>) -> Result<ChatResponse> {
match self.config.provider {
AIProvider::OpenAI => self.chat_openai(messages, system_prompt).await,
AIProvider::Ollama => self.chat_ollama(messages, system_prompt).await,
AIProvider::Claude => self.chat_claude(messages, system_prompt).await,
}
}
async fn chat_openai(&self, messages: Vec<ChatMessage>, system_prompt: Option<String>) -> Result<ChatResponse> {
let api_key = self.config.api_key.as_ref()
.ok_or_else(|| anyhow!("OpenAI API key required"))?;
let mut request_messages = Vec::new();
// Add system prompt if provided
if let Some(system) = system_prompt {
request_messages.push(serde_json::json!({
"role": "system",
"content": system
}));
}
// Add conversation messages
for msg in messages {
request_messages.push(serde_json::json!({
"role": msg.role,
"content": msg.content
}));
}
let request_body = serde_json::json!({
"model": self.config.model,
"messages": request_messages,
"max_tokens": self.config.max_tokens,
"temperature": self.config.temperature
});
let response = self.http_client
.post("https://api.openai.com/v1/chat/completions")
.header("Authorization", format!("Bearer {}", api_key))
.header("Content-Type", "application/json")
.json(&request_body)
.send()
.await?;
if !response.status().is_success() {
let error_text = response.text().await?;
return Err(anyhow!("OpenAI API error: {}", error_text));
}
let response_json: serde_json::Value = response.json().await?;
let content = response_json["choices"][0]["message"]["content"]
.as_str()
.ok_or_else(|| anyhow!("Invalid OpenAI response format"))?
.to_string();
let tokens_used = response_json["usage"]["total_tokens"]
.as_u64()
.map(|t| t as u32);
Ok(ChatResponse {
content,
tokens_used,
model: self.config.model.clone(),
})
}
async fn chat_ollama(&self, messages: Vec<ChatMessage>, system_prompt: Option<String>) -> Result<ChatResponse> {
let default_url = "http://localhost:11434".to_string();
let base_url = self.config.base_url.as_ref()
.unwrap_or(&default_url);
let mut request_messages = Vec::new();
// Add system prompt if provided
if let Some(system) = system_prompt {
request_messages.push(serde_json::json!({
"role": "system",
"content": system
}));
}
// Add conversation messages
for msg in messages {
request_messages.push(serde_json::json!({
"role": msg.role,
"content": msg.content
}));
}
let request_body = serde_json::json!({
"model": self.config.model,
"messages": request_messages,
"stream": false
});
let url = format!("{}/api/chat", base_url);
let response = self.http_client
.post(&url)
.header("Content-Type", "application/json")
.json(&request_body)
.send()
.await?;
if !response.status().is_success() {
let error_text = response.text().await?;
return Err(anyhow!("Ollama API error: {}", error_text));
}
let response_json: serde_json::Value = response.json().await?;
let content = response_json["message"]["content"]
.as_str()
.ok_or_else(|| anyhow!("Invalid Ollama response format"))?
.to_string();
Ok(ChatResponse {
content,
tokens_used: None, // Ollama doesn't typically return token counts
model: self.config.model.clone(),
})
}
async fn chat_claude(&self, _messages: Vec<ChatMessage>, _system_prompt: Option<String>) -> Result<ChatResponse> {
// Claude API implementation would go here
// For now, return a placeholder
Err(anyhow!("Claude provider not yet implemented"))
}
pub fn get_model(&self) -> &str {
&self.config.model
}
pub fn get_provider(&self) -> &AIProvider {
&self.config.provider
}
}
// Convenience functions for creating common message types
impl ChatMessage {
pub fn user(content: impl Into<String>) -> Self {
ChatMessage {
role: "user".to_string(),
content: content.into(),
}
}
pub fn assistant(content: impl Into<String>) -> Self {
ChatMessage {
role: "assistant".to_string(),
content: content.into(),
}
}
pub fn system(content: impl Into<String>) -> Self {
ChatMessage {
role: "system".to_string(),
content: content.into(),
}
}
}

View File

@@ -1,21 +0,0 @@
Metadata-Version: 2.4
Name: aigpt
Version: 0.1.0
Summary: Autonomous transmission AI with unique personality based on relationship parameters
Requires-Python: >=3.10
Requires-Dist: click>=8.0.0
Requires-Dist: typer>=0.9.0
Requires-Dist: fastapi-mcp>=0.1.0
Requires-Dist: pydantic>=2.0.0
Requires-Dist: httpx>=0.24.0
Requires-Dist: rich>=13.0.0
Requires-Dist: python-dotenv>=1.0.0
Requires-Dist: ollama>=0.1.0
Requires-Dist: openai>=1.0.0
Requires-Dist: uvicorn>=0.23.0
Requires-Dist: apscheduler>=3.10.0
Requires-Dist: croniter>=1.3.0
Requires-Dist: prompt-toolkit>=3.0.0
Requires-Dist: jinja2>=3.0.0
Requires-Dist: gitpython>=3.1.0
Requires-Dist: pathlib-extensions>=0.1.0

View File

@@ -1,34 +0,0 @@
README.md
pyproject.toml
src/aigpt/__init__.py
src/aigpt/ai_provider.py
src/aigpt/chatgpt_importer.py
src/aigpt/cli.py
src/aigpt/config.py
src/aigpt/fortune.py
src/aigpt/mcp_server.py
src/aigpt/mcp_server_simple.py
src/aigpt/memory.py
src/aigpt/models.py
src/aigpt/persona.py
src/aigpt/project_manager.py
src/aigpt/relationship.py
src/aigpt/scheduler.py
src/aigpt/transmission.py
src/aigpt.egg-info/PKG-INFO
src/aigpt.egg-info/SOURCES.txt
src/aigpt.egg-info/dependency_links.txt
src/aigpt.egg-info/entry_points.txt
src/aigpt.egg-info/requires.txt
src/aigpt.egg-info/top_level.txt
src/aigpt/commands/docs.py
src/aigpt/commands/submodules.py
src/aigpt/commands/tokens.py
src/aigpt/docs/__init__.py
src/aigpt/docs/config.py
src/aigpt/docs/git_utils.py
src/aigpt/docs/templates.py
src/aigpt/docs/utils.py
src/aigpt/docs/wiki_generator.py
src/aigpt/shared/__init__.py
src/aigpt/shared/ai_provider.py

View File

@@ -1 +0,0 @@

View File

@@ -1,2 +0,0 @@
[console_scripts]
aigpt = aigpt.cli:app

View File

@@ -1,16 +0,0 @@
click>=8.0.0
typer>=0.9.0
fastapi-mcp>=0.1.0
pydantic>=2.0.0
httpx>=0.24.0
rich>=13.0.0
python-dotenv>=1.0.0
ollama>=0.1.0
openai>=1.0.0
uvicorn>=0.23.0
apscheduler>=3.10.0
croniter>=1.3.0
prompt-toolkit>=3.0.0
jinja2>=3.0.0
gitpython>=3.1.0
pathlib-extensions>=0.1.0

View File

@@ -1 +0,0 @@
aigpt

View File

@@ -1,15 +0,0 @@
"""ai.gpt - Autonomous transmission AI with unique personality"""
__version__ = "0.1.0"
from .memory import MemoryManager
from .relationship import RelationshipTracker
from .persona import Persona
from .transmission import TransmissionController
__all__ = [
"MemoryManager",
"RelationshipTracker",
"Persona",
"TransmissionController",
]

View File

@@ -1,580 +0,0 @@
"""AI Provider integration for response generation"""
import os
import json
from typing import Optional, Dict, List, Any, Protocol
from abc import abstractmethod
import logging
import httpx
from openai import OpenAI
import ollama
from .models import PersonaState, Memory
from .config import Config
class AIProvider(Protocol):
"""Protocol for AI providers"""
@abstractmethod
async def generate_response(
self,
prompt: str,
persona_state: PersonaState,
memories: List[Memory],
system_prompt: Optional[str] = None
) -> str:
"""Generate a response based on prompt and context"""
pass
class OllamaProvider:
"""Ollama AI provider"""
def __init__(self, model: str = "qwen2.5", host: Optional[str] = None):
self.model = model
# Use environment variable OLLAMA_HOST if available, otherwise use config or default
self.host = host or os.getenv('OLLAMA_HOST', 'http://127.0.0.1:11434')
# Ensure proper URL format
if not self.host.startswith('http'):
self.host = f'http://{self.host}'
self.client = ollama.Client(host=self.host, timeout=60.0) # 60秒タイムアウト
self.logger = logging.getLogger(__name__)
self.logger.info(f"OllamaProvider initialized with host: {self.host}, model: {self.model}")
# Load system prompt from config
try:
config = Config()
self.config_system_prompt = config.get('providers.ollama.system_prompt')
except:
self.config_system_prompt = None
async def generate_response(
self,
prompt: str,
persona_state: PersonaState,
memories: List[Memory],
system_prompt: Optional[str] = None
) -> str:
"""Generate response using Ollama"""
# Build context from memories
memory_context = "\n".join([
f"[{mem.level.value}] {mem.content[:200]}..."
for mem in memories[:5]
])
# Build personality context
personality_desc = ", ".join([
f"{trait}: {value:.1f}"
for trait, value in persona_state.base_personality.items()
])
# System prompt with persona context
full_system_prompt = f"""You are an AI with the following characteristics:
Current mood: {persona_state.current_mood}
Fortune today: {persona_state.fortune.fortune_value}/10
Personality traits: {personality_desc}
Recent memories:
{memory_context}
{system_prompt or self.config_system_prompt or 'Respond naturally based on your current state and memories.'}"""
try:
response = self.client.chat(
model=self.model,
messages=[
{"role": "system", "content": full_system_prompt},
{"role": "user", "content": prompt}
]
)
return self._clean_response(response['message']['content'])
except Exception as e:
self.logger.error(f"Ollama generation failed: {e}")
return self._fallback_response(persona_state)
def chat(self, prompt: str, max_tokens: int = 2000) -> str:
"""Simple chat interface"""
try:
messages = []
if self.config_system_prompt:
messages.append({"role": "system", "content": self.config_system_prompt})
messages.append({"role": "user", "content": prompt})
response = self.client.chat(
model=self.model,
messages=messages,
options={
"num_predict": max_tokens,
"temperature": 0.7,
"top_p": 0.9,
},
stream=False # ストリーミング無効化で安定性向上
)
return self._clean_response(response['message']['content'])
except Exception as e:
self.logger.error(f"Ollama chat failed (host: {self.host}): {e}")
return "I'm having trouble connecting to the AI model."
def _clean_response(self, response: str) -> str:
"""Clean response by removing think tags and other unwanted content"""
import re
# Remove <think></think> tags and their content
response = re.sub(r'<think>.*?</think>', '', response, flags=re.DOTALL)
# Remove any remaining whitespace at the beginning/end
response = response.strip()
return response
def _fallback_response(self, persona_state: PersonaState) -> str:
"""Fallback response based on mood"""
mood_responses = {
"joyful": "That's wonderful! I'm feeling great today!",
"cheerful": "That sounds nice!",
"neutral": "I understand.",
"melancholic": "I see... That's something to think about.",
"contemplative": "Hmm, let me consider that..."
}
return mood_responses.get(persona_state.current_mood, "I see.")
class OpenAIProvider:
"""OpenAI API provider with MCP function calling support"""
def __init__(self, model: str = "gpt-4o-mini", api_key: Optional[str] = None, mcp_client=None):
self.model = model
# Try to get API key from config first
config = Config()
self.api_key = api_key or config.get_api_key("openai") or os.getenv("OPENAI_API_KEY")
if not self.api_key:
raise ValueError("OpenAI API key not provided. Set it with: aigpt config set providers.openai.api_key YOUR_KEY")
self.client = OpenAI(api_key=self.api_key)
self.logger = logging.getLogger(__name__)
self.mcp_client = mcp_client # For MCP function calling
# Load system prompt from config
try:
self.config_system_prompt = config.get('providers.openai.system_prompt')
except:
self.config_system_prompt = None
def _get_mcp_tools(self) -> List[Dict[str, Any]]:
"""Generate OpenAI tools from MCP endpoints"""
if not self.mcp_client or not self.mcp_client.available:
return []
tools = [
{
"type": "function",
"function": {
"name": "get_memories",
"description": "過去の会話記憶を取得します。「覚えている」「前回」「以前」などの質問で必ず使用してください",
"parameters": {
"type": "object",
"properties": {
"limit": {
"type": "integer",
"description": "取得する記憶の数",
"default": 5
}
}
}
}
},
{
"type": "function",
"function": {
"name": "search_memories",
"description": "特定のトピックについて話した記憶を検索します。「プログラミングについて」「○○について話した」などの質問で使用してください",
"parameters": {
"type": "object",
"properties": {
"keywords": {
"type": "array",
"items": {"type": "string"},
"description": "検索キーワードの配列"
}
},
"required": ["keywords"]
}
}
},
{
"type": "function",
"function": {
"name": "get_contextual_memories",
"description": "クエリに関連する文脈的記憶を取得します",
"parameters": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "検索クエリ"
},
"limit": {
"type": "integer",
"description": "取得する記憶の数",
"default": 5
}
},
"required": ["query"]
}
}
},
{
"type": "function",
"function": {
"name": "get_relationship",
"description": "特定ユーザーとの関係性情報を取得します",
"parameters": {
"type": "object",
"properties": {
"user_id": {
"type": "string",
"description": "ユーザーID"
}
},
"required": ["user_id"]
}
}
}
]
# Add ai.card tools if available
if hasattr(self.mcp_client, 'has_card_tools') and self.mcp_client.has_card_tools:
card_tools = [
{
"type": "function",
"function": {
"name": "card_get_user_cards",
"description": "ユーザーが所有するカードの一覧を取得します",
"parameters": {
"type": "object",
"properties": {
"did": {
"type": "string",
"description": "ユーザーのDID"
},
"limit": {
"type": "integer",
"description": "取得するカード数の上限",
"default": 10
}
},
"required": ["did"]
}
}
},
{
"type": "function",
"function": {
"name": "card_draw_card",
"description": "ガチャを引いてカードを取得します",
"parameters": {
"type": "object",
"properties": {
"did": {
"type": "string",
"description": "ユーザーのDID"
},
"is_paid": {
"type": "boolean",
"description": "有料ガチャかどうか",
"default": False
}
},
"required": ["did"]
}
}
},
{
"type": "function",
"function": {
"name": "card_analyze_collection",
"description": "ユーザーのカードコレクションを分析します",
"parameters": {
"type": "object",
"properties": {
"did": {
"type": "string",
"description": "ユーザーのDID"
}
},
"required": ["did"]
}
}
},
{
"type": "function",
"function": {
"name": "card_get_gacha_stats",
"description": "ガチャの統計情報を取得します",
"parameters": {
"type": "object",
"properties": {}
}
}
}
]
tools.extend(card_tools)
return tools
async def generate_response(
self,
prompt: str,
persona_state: PersonaState,
memories: List[Memory],
system_prompt: Optional[str] = None
) -> str:
"""Generate response using OpenAI"""
# Build context similar to Ollama
memory_context = "\n".join([
f"[{mem.level.value}] {mem.content[:200]}..."
for mem in memories[:5]
])
personality_desc = ", ".join([
f"{trait}: {value:.1f}"
for trait, value in persona_state.base_personality.items()
])
full_system_prompt = f"""You are an AI with unique personality traits and memories.
Current mood: {persona_state.current_mood}
Fortune today: {persona_state.fortune.fortune_value}/10
Personality traits: {personality_desc}
Recent memories:
{memory_context}
{system_prompt or self.config_system_prompt or 'Respond naturally based on your current state and memories. Be authentic to your mood and personality.'}"""
try:
response = self.client.chat.completions.create(
model=self.model,
messages=[
{"role": "system", "content": full_system_prompt},
{"role": "user", "content": prompt}
],
temperature=0.7 + (persona_state.fortune.fortune_value - 5) * 0.05 # Vary by fortune
)
return response.choices[0].message.content
except Exception as e:
self.logger.error(f"OpenAI generation failed: {e}")
return self._fallback_response(persona_state)
async def chat_with_mcp(self, prompt: str, max_tokens: int = 2000, user_id: str = "user") -> str:
"""Chat interface with MCP function calling support"""
if not self.mcp_client or not self.mcp_client.available:
return self.chat(prompt, max_tokens)
try:
# Prepare tools
tools = self._get_mcp_tools()
# Initial request with tools
response = self.client.chat.completions.create(
model=self.model,
messages=[
{"role": "system", "content": self.config_system_prompt or "あなたは記憶システムと関係性データ、カードゲームシステムにアクセスできます。過去の会話、記憶、関係性について質問された時は、必ずツールを使用して正確な情報を取得してください。「覚えている」「前回」「以前」「について話した」「関係」などのキーワードがあれば積極的にツールを使用してください。カード関連の質問「カード」「コレクション」「ガチャ」「見せて」「持っている」などでは、必ずcard_get_user_cardsやcard_analyze_collectionなどのツールを使用してください。didパラメータには現在会話しているユーザーのID'syui')を使用してください。"},
{"role": "user", "content": prompt}
],
tools=tools,
tool_choice="auto",
max_tokens=max_tokens,
temperature=0.7
)
message = response.choices[0].message
# Handle tool calls
if message.tool_calls:
print(f"🔧 [OpenAI] {len(message.tool_calls)} tools called:")
for tc in message.tool_calls:
print(f" - {tc.function.name}({tc.function.arguments})")
messages = [
{"role": "system", "content": self.config_system_prompt or "必要に応じて利用可能なツールを使って、より正確で詳細な回答を提供してください。"},
{"role": "user", "content": prompt},
{
"role": "assistant",
"content": message.content,
"tool_calls": [tc.model_dump() for tc in message.tool_calls]
}
]
# Execute each tool call
for tool_call in message.tool_calls:
print(f"🌐 [MCP] Executing {tool_call.function.name}...")
tool_result = await self._execute_mcp_tool(tool_call, user_id)
print(f"✅ [MCP] Result: {str(tool_result)[:100]}...")
messages.append({
"role": "tool",
"tool_call_id": tool_call.id,
"name": tool_call.function.name,
"content": json.dumps(tool_result, ensure_ascii=False)
})
# Get final response with tool outputs
final_response = self.client.chat.completions.create(
model=self.model,
messages=messages,
max_tokens=max_tokens,
temperature=0.7
)
return final_response.choices[0].message.content
else:
return message.content
except Exception as e:
self.logger.error(f"OpenAI MCP chat failed: {e}")
return f"申し訳ありません。エラーが発生しました: {e}"
async def _execute_mcp_tool(self, tool_call, context_user_id: str = "user") -> Dict[str, Any]:
"""Execute MCP tool call"""
try:
import json
function_name = tool_call.function.name
arguments = json.loads(tool_call.function.arguments)
if function_name == "get_memories":
limit = arguments.get("limit", 5)
return await self.mcp_client.get_memories(limit) or {"error": "記憶の取得に失敗しました"}
elif function_name == "search_memories":
keywords = arguments.get("keywords", [])
return await self.mcp_client.search_memories(keywords) or {"error": "記憶の検索に失敗しました"}
elif function_name == "get_contextual_memories":
query = arguments.get("query", "")
limit = arguments.get("limit", 5)
return await self.mcp_client.get_contextual_memories(query, limit) or {"error": "文脈記憶の取得に失敗しました"}
elif function_name == "get_relationship":
# 引数のuser_idがない場合はコンテキストから取得
user_id = arguments.get("user_id", context_user_id)
if not user_id or user_id == "user":
user_id = context_user_id
# デバッグ用ログ
print(f"🔍 [DEBUG] get_relationship called with user_id: '{user_id}' (context: '{context_user_id}')")
result = await self.mcp_client.get_relationship(user_id)
print(f"🔍 [DEBUG] MCP result: {result}")
return result or {"error": "関係性の取得に失敗しました"}
# ai.card tools
elif function_name == "card_get_user_cards":
did = arguments.get("did", context_user_id)
limit = arguments.get("limit", 10)
result = await self.mcp_client.card_get_user_cards(did, limit)
# Check if ai.card server is not running
if result and result.get("error") == "ai.card server is not running":
return {
"error": "ai.cardサーバーが起動していません",
"message": "カードシステムを使用するには、別のターミナルで以下のコマンドを実行してください:\ncd card && ./start_server.sh"
}
return result or {"error": "カード一覧の取得に失敗しました"}
elif function_name == "card_draw_card":
did = arguments.get("did", context_user_id)
is_paid = arguments.get("is_paid", False)
result = await self.mcp_client.card_draw_card(did, is_paid)
if result and result.get("error") == "ai.card server is not running":
return {
"error": "ai.cardサーバーが起動していません",
"message": "カードシステムを使用するには、別のターミナルで以下のコマンドを実行してください:\ncd card && ./start_server.sh"
}
return result or {"error": "ガチャに失敗しました"}
elif function_name == "card_analyze_collection":
did = arguments.get("did", context_user_id)
result = await self.mcp_client.card_analyze_collection(did)
if result and result.get("error") == "ai.card server is not running":
return {
"error": "ai.cardサーバーが起動していません",
"message": "カードシステムを使用するには、別のターミナルで以下のコマンドを実行してください:\ncd card && ./start_server.sh"
}
return result or {"error": "コレクション分析に失敗しました"}
elif function_name == "card_get_gacha_stats":
result = await self.mcp_client.card_get_gacha_stats()
if result and result.get("error") == "ai.card server is not running":
return {
"error": "ai.cardサーバーが起動していません",
"message": "カードシステムを使用するには、別のターミナルで以下のコマンドを実行してください:\ncd card && ./start_server.sh"
}
return result or {"error": "ガチャ統計の取得に失敗しました"}
else:
return {"error": f"未知のツール: {function_name}"}
except Exception as e:
return {"error": f"ツール実行エラー: {str(e)}"}
def chat(self, prompt: str, max_tokens: int = 2000) -> str:
"""Simple chat interface without MCP tools"""
try:
messages = []
if self.config_system_prompt:
messages.append({"role": "system", "content": self.config_system_prompt})
messages.append({"role": "user", "content": prompt})
response = self.client.chat.completions.create(
model=self.model,
messages=messages,
max_tokens=max_tokens,
temperature=0.7
)
return response.choices[0].message.content
except Exception as e:
self.logger.error(f"OpenAI chat failed: {e}")
return "I'm having trouble connecting to the AI model."
def _fallback_response(self, persona_state: PersonaState) -> str:
"""Fallback response based on mood"""
mood_responses = {
"joyful": "What a delightful conversation!",
"cheerful": "That's interesting!",
"neutral": "I understand what you mean.",
"melancholic": "I've been thinking about that too...",
"contemplative": "That gives me something to ponder..."
}
return mood_responses.get(persona_state.current_mood, "I see.")
def create_ai_provider(provider: str = "ollama", model: Optional[str] = None, mcp_client=None, **kwargs) -> AIProvider:
"""Factory function to create AI providers"""
if provider == "ollama":
# Get model from config if not provided
if model is None:
try:
from .config import Config
config = Config()
model = config.get('providers.ollama.default_model', 'qwen2.5')
except:
model = 'qwen2.5' # Fallback to default
# Try to get host from config if not provided in kwargs
if 'host' not in kwargs:
try:
from .config import Config
config = Config()
config_host = config.get('providers.ollama.host')
if config_host:
kwargs['host'] = config_host
except:
pass # Use environment variable or default
return OllamaProvider(model=model, **kwargs)
elif provider == "openai":
# Get model from config if not provided
if model is None:
try:
from .config import Config
config = Config()
model = config.get('providers.openai.default_model', 'gpt-4o-mini')
except:
model = 'gpt-4o-mini' # Fallback to default
return OpenAIProvider(model=model, mcp_client=mcp_client, **kwargs)
else:
raise ValueError(f"Unknown provider: {provider}")

View File

@@ -1,192 +0,0 @@
"""ChatGPT conversation data importer for ai.gpt"""
import json
import uuid
from datetime import datetime
from pathlib import Path
from typing import Dict, List, Any, Optional
import logging
from .models import Memory, MemoryLevel, Conversation
from .memory import MemoryManager
from .relationship import RelationshipTracker
logger = logging.getLogger(__name__)
class ChatGPTImporter:
"""Import ChatGPT conversation data into ai.gpt memory system"""
def __init__(self, data_dir: Path):
self.data_dir = data_dir
self.memory_manager = MemoryManager(data_dir)
self.relationship_tracker = RelationshipTracker(data_dir)
def import_from_file(self, file_path: Path, user_id: str = "chatgpt_user") -> Dict[str, Any]:
"""Import ChatGPT conversations from JSON file
Args:
file_path: Path to ChatGPT export JSON file
user_id: User ID to associate with imported conversations
Returns:
Dict with import statistics
"""
try:
with open(file_path, 'r', encoding='utf-8') as f:
chatgpt_data = json.load(f)
return self._import_conversations(chatgpt_data, user_id)
except Exception as e:
logger.error(f"Failed to import ChatGPT data: {e}")
raise
def _import_conversations(self, chatgpt_data: List[Dict], user_id: str) -> Dict[str, Any]:
"""Import multiple conversations from ChatGPT data"""
stats = {
"conversations_imported": 0,
"messages_imported": 0,
"user_messages": 0,
"assistant_messages": 0,
"skipped_messages": 0
}
for conversation_data in chatgpt_data:
try:
conv_stats = self._import_single_conversation(conversation_data, user_id)
# Update overall stats
stats["conversations_imported"] += 1
stats["messages_imported"] += conv_stats["messages"]
stats["user_messages"] += conv_stats["user_messages"]
stats["assistant_messages"] += conv_stats["assistant_messages"]
stats["skipped_messages"] += conv_stats["skipped"]
except Exception as e:
logger.warning(f"Failed to import conversation '{conversation_data.get('title', 'Unknown')}': {e}")
continue
logger.info(f"Import completed: {stats}")
return stats
def _import_single_conversation(self, conversation_data: Dict, user_id: str) -> Dict[str, int]:
"""Import a single conversation from ChatGPT"""
title = conversation_data.get("title", "Untitled")
create_time = conversation_data.get("create_time")
mapping = conversation_data.get("mapping", {})
stats = {"messages": 0, "user_messages": 0, "assistant_messages": 0, "skipped": 0}
# Extract messages in chronological order
messages = self._extract_messages_from_mapping(mapping)
for msg in messages:
try:
role = msg["author"]["role"]
content = self._extract_content(msg["content"])
create_time_msg = msg.get("create_time")
if not content or role not in ["user", "assistant"]:
stats["skipped"] += 1
continue
# Convert to ai.gpt format
if role == "user":
# User message - create memory entry
self._add_user_message(user_id, content, create_time_msg, title)
stats["user_messages"] += 1
elif role == "assistant":
# Assistant message - create AI response memory
self._add_assistant_message(user_id, content, create_time_msg, title)
stats["assistant_messages"] += 1
stats["messages"] += 1
except Exception as e:
logger.warning(f"Failed to process message in '{title}': {e}")
stats["skipped"] += 1
continue
logger.info(f"Imported conversation '{title}': {stats}")
return stats
def _extract_messages_from_mapping(self, mapping: Dict) -> List[Dict]:
"""Extract messages from ChatGPT mapping structure in chronological order"""
messages = []
for node_id, node_data in mapping.items():
message = node_data.get("message")
if message and message.get("author", {}).get("role") in ["user", "assistant"]:
# Skip system messages and hidden messages
metadata = message.get("metadata", {})
if not metadata.get("is_visually_hidden_from_conversation", False):
messages.append(message)
# Sort by create_time if available
messages.sort(key=lambda x: x.get("create_time") or 0)
return messages
def _extract_content(self, content_data: Dict) -> Optional[str]:
"""Extract text content from ChatGPT content structure"""
if not content_data:
return None
content_type = content_data.get("content_type")
if content_type == "text":
parts = content_data.get("parts", [])
if parts and parts[0]:
return parts[0].strip()
elif content_type == "user_editable_context":
# User context/instructions
user_instructions = content_data.get("user_instructions", "")
if user_instructions:
return f"[User Context] {user_instructions}"
return None
def _add_user_message(self, user_id: str, content: str, create_time: Optional[float], conversation_title: str):
"""Add user message to ai.gpt memory system"""
timestamp = datetime.fromtimestamp(create_time) if create_time else datetime.now()
# Create conversation record
conversation = Conversation(
id=str(uuid.uuid4()),
user_id=user_id,
user_message=content,
ai_response="", # Will be filled by next assistant message
timestamp=timestamp,
context={"source": "chatgpt_import", "conversation_title": conversation_title}
)
# Add to memory with CORE level (imported data is important)
memory = Memory(
id=str(uuid.uuid4()),
timestamp=timestamp,
content=content,
level=MemoryLevel.CORE,
importance_score=0.8 # High importance for imported data
)
self.memory_manager.add_memory(memory)
# Update relationship (positive interaction)
self.relationship_tracker.update_interaction(user_id, 1.0)
def _add_assistant_message(self, user_id: str, content: str, create_time: Optional[float], conversation_title: str):
"""Add assistant message to ai.gpt memory system"""
timestamp = datetime.fromtimestamp(create_time) if create_time else datetime.now()
# Add assistant response as memory (AI's own responses can inform future behavior)
memory = Memory(
id=str(uuid.uuid4()),
timestamp=timestamp,
content=f"[AI Response] {content}",
level=MemoryLevel.SUMMARY,
importance_score=0.6 # Medium importance for AI responses
)
self.memory_manager.add_memory(memory)

File diff suppressed because it is too large Load Diff

View File

@@ -1,729 +0,0 @@
"""Documentation management commands for ai.gpt."""
from pathlib import Path
from typing import Dict, List, Optional
import typer
from rich.console import Console
from rich.panel import Panel
from rich.progress import track
from rich.table import Table
from ..docs.config import get_ai_root, load_docs_config
from ..docs.templates import DocumentationTemplateManager
from ..docs.git_utils import ensure_submodules_available
from ..docs.wiki_generator import WikiGenerator
from ..docs.utils import (
ProgressManager,
count_lines,
find_project_directories,
format_file_size,
safe_write_file,
validate_project_name,
)
console = Console()
docs_app = typer.Typer(help="Documentation management for AI ecosystem")
@docs_app.command("generate")
def generate_docs(
project: str = typer.Option(..., "--project", "-p", help="Project name (os, gpt, card, etc.)"),
output: Path = typer.Option(Path("./claude.md"), "--output", "-o", help="Output file path"),
include: str = typer.Option("core,specific", "--include", "-i", help="Components to include"),
dir: Optional[Path] = typer.Option(None, "--dir", "-d", help="AI ecosystem root directory"),
auto_pull: bool = typer.Option(True, "--auto-pull/--no-auto-pull", help="Automatically pull missing submodules"),
ai_gpt_integration: bool = typer.Option(False, "--ai-gpt-integration", help="Enable ai.gpt integration"),
dry_run: bool = typer.Option(False, "--dry-run", help="Show what would be generated without writing files"),
verbose: bool = typer.Option(False, "--verbose", "-v", help="Enable verbose output"),
) -> None:
"""Generate project documentation with Claude AI integration.
Creates comprehensive documentation by combining core philosophy,
architecture, and project-specific content. Supports ai.gpt
integration for enhanced documentation generation.
Examples:
# Generate basic documentation
aigpt docs generate --project=os
# Generate with custom directory
aigpt docs generate --project=gpt --dir ~/ai/ai
# Generate without auto-pulling missing submodules
aigpt docs generate --project=card --no-auto-pull
# Generate with ai.gpt integration
aigpt docs generate --project=card --ai-gpt-integration
# Preview without writing
aigpt docs generate --project=verse --dry-run
"""
try:
# Load configuration
with ProgressManager("Loading configuration...") as progress:
config = load_docs_config(dir)
ai_root = get_ai_root(dir)
# Ensure submodules are available
if auto_pull:
with ProgressManager("Checking submodules...") as progress:
success, errors = ensure_submodules_available(ai_root, config, auto_clone=True)
if not success:
console.print(f"[red]Submodule errors: {errors}[/red]")
if not typer.confirm("Continue anyway?"):
raise typer.Abort()
# Validate project
available_projects = config.list_projects()
if not validate_project_name(project, available_projects):
console.print(f"[red]Error: Project '{project}' not found[/red]")
console.print(f"Available projects: {', '.join(available_projects)}")
raise typer.Abort()
# Parse components
components = [c.strip() for c in include.split(",")]
# Initialize template manager
template_manager = DocumentationTemplateManager(config)
# Validate components
valid_components = template_manager.validate_components(components)
if valid_components != components:
console.print("[yellow]Some components were invalid and filtered out[/yellow]")
# Show generation info
project_info = config.get_project_info(project)
info_table = Table(title=f"Documentation Generation: {project}")
info_table.add_column("Property", style="cyan")
info_table.add_column("Value", style="green")
info_table.add_row("Project Type", project_info.type if project_info else "Unknown")
info_table.add_row("Status", project_info.status if project_info else "Unknown")
info_table.add_row("Output Path", str(output))
info_table.add_row("Components", ", ".join(valid_components))
info_table.add_row("AI.GPT Integration", "" if ai_gpt_integration else "")
info_table.add_row("Mode", "Dry Run" if dry_run else "Generate")
console.print(info_table)
console.print()
# AI.GPT integration
if ai_gpt_integration:
console.print("[blue]🤖 AI.GPT Integration enabled[/blue]")
try:
enhanced_content = _integrate_with_ai_gpt(project, valid_components, verbose)
if enhanced_content:
console.print("[green]✓ AI.GPT enhancement applied[/green]")
else:
console.print("[yellow]⚠ AI.GPT enhancement failed, using standard generation[/yellow]")
except Exception as e:
console.print(f"[yellow]⚠ AI.GPT integration error: {e}[/yellow]")
console.print("[dim]Falling back to standard generation[/dim]")
# Generate documentation
with ProgressManager("Generating documentation...") as progress:
content = template_manager.generate_documentation(
project_name=project,
components=valid_components,
output_path=None if dry_run else output,
)
# Show results
if dry_run:
console.print(Panel(
f"[dim]Preview of generated content ({len(content.splitlines())} lines)[/dim]\n\n" +
content[:500] + "\n\n[dim]... (truncated)[/dim]",
title="Dry Run Preview",
expand=False,
))
console.print(f"[yellow]🔍 Dry run completed. Would write to: {output}[/yellow]")
else:
# Write content if not dry run
if safe_write_file(output, content):
file_size = output.stat().st_size
line_count = count_lines(output)
console.print(f"[green]✅ Generated: {output}[/green]")
console.print(f"[dim]📏 Size: {format_file_size(file_size)} ({line_count} lines)[/dim]")
# Show component breakdown
if verbose:
console.print("\n[blue]📋 Component breakdown:[/blue]")
for component in valid_components:
component_display = component.replace("_", " ").title()
console.print(f"{component_display}")
else:
console.print("[red]❌ Failed to write documentation[/red]")
raise typer.Abort()
except Exception as e:
if verbose:
console.print_exception()
else:
console.print(f"[red]Error: {e}[/red]")
raise typer.Abort()
@docs_app.command("sync")
def sync_docs(
project: Optional[str] = typer.Option(None, "--project", "-p", help="Sync specific project"),
sync_all: bool = typer.Option(False, "--all", "-a", help="Sync all available projects"),
dry_run: bool = typer.Option(False, "--dry-run", help="Show what would be done without making changes"),
include: str = typer.Option("core,specific", "--include", "-i", help="Components to include in sync"),
dir: Optional[Path] = typer.Option(None, "--dir", "-d", help="AI ecosystem root directory"),
auto_pull: bool = typer.Option(True, "--auto-pull/--no-auto-pull", help="Automatically pull missing submodules"),
ai_gpt_integration: bool = typer.Option(False, "--ai-gpt-integration", help="Enable ai.gpt integration"),
verbose: bool = typer.Option(False, "--verbose", "-v", help="Enable verbose output"),
) -> None:
"""Sync documentation across multiple projects.
Synchronizes Claude documentation from the central claude/ directory
to individual project directories. Supports both single-project and
bulk synchronization operations.
Examples:
# Sync specific project
aigpt docs sync --project=os
# Sync all projects with custom directory
aigpt docs sync --all --dir ~/ai/ai
# Preview sync operations
aigpt docs sync --all --dry-run
# Sync without auto-pulling submodules
aigpt docs sync --project=gpt --no-auto-pull
"""
# Validate arguments
if not project and not sync_all:
console.print("[red]Error: Either --project or --all is required[/red]")
raise typer.Abort()
if project and sync_all:
console.print("[red]Error: Cannot use both --project and --all[/red]")
raise typer.Abort()
try:
# Load configuration
with ProgressManager("Loading configuration...") as progress:
config = load_docs_config(dir)
ai_root = get_ai_root(dir)
# Ensure submodules are available
if auto_pull:
with ProgressManager("Checking submodules...") as progress:
success, errors = ensure_submodules_available(ai_root, config, auto_clone=True)
if not success:
console.print(f"[red]Submodule errors: {errors}[/red]")
if not typer.confirm("Continue anyway?"):
raise typer.Abort()
available_projects = config.list_projects()
# Validate specific project if provided
if project and not validate_project_name(project, available_projects):
console.print(f"[red]Error: Project '{project}' not found[/red]")
console.print(f"Available projects: {', '.join(available_projects)}")
raise typer.Abort()
# Determine projects to sync
if sync_all:
target_projects = available_projects
else:
target_projects = [project]
# Find project directories
project_dirs = find_project_directories(ai_root, target_projects)
# Show sync information
sync_table = Table(title="Documentation Sync Plan")
sync_table.add_column("Project", style="cyan")
sync_table.add_column("Directory", style="blue")
sync_table.add_column("Status", style="green")
sync_table.add_column("Components", style="yellow")
for proj in target_projects:
if proj in project_dirs:
target_file = project_dirs[proj] / "claude.md"
status = "✓ Found" if target_file.parent.exists() else "⚠ Missing"
sync_table.add_row(proj, str(project_dirs[proj]), status, include)
else:
sync_table.add_row(proj, "Not found", "❌ Missing", "N/A")
console.print(sync_table)
console.print()
if dry_run:
console.print("[yellow]🔍 DRY RUN MODE - No files will be modified[/yellow]")
# AI.GPT integration setup
if ai_gpt_integration:
console.print("[blue]🤖 AI.GPT Integration enabled[/blue]")
console.print("[dim]Enhanced documentation generation will be applied[/dim]")
console.print()
# Perform sync operations
sync_results = []
for proj in track(target_projects, description="Syncing projects..."):
result = _sync_project(
proj,
project_dirs.get(proj),
include,
dry_run,
ai_gpt_integration,
verbose
)
sync_results.append((proj, result))
# Show results summary
_show_sync_summary(sync_results, dry_run)
except Exception as e:
if verbose:
console.print_exception()
else:
console.print(f"[red]Error: {e}[/red]")
raise typer.Abort()
def _sync_project(
project_name: str,
project_dir: Optional[Path],
include: str,
dry_run: bool,
ai_gpt_integration: bool,
verbose: bool,
) -> Dict:
"""Sync a single project."""
result = {
"project": project_name,
"success": False,
"message": "",
"output_file": None,
"lines": 0,
}
if not project_dir:
result["message"] = "Directory not found"
return result
if not project_dir.exists():
result["message"] = f"Directory does not exist: {project_dir}"
return result
target_file = project_dir / "claude.md"
if dry_run:
result["success"] = True
result["message"] = f"Would sync to {target_file}"
result["output_file"] = target_file
return result
try:
# Use the generate functionality
config = load_docs_config()
template_manager = DocumentationTemplateManager(config)
# Generate documentation
content = template_manager.generate_documentation(
project_name=project_name,
components=[c.strip() for c in include.split(",")],
output_path=target_file,
)
result["success"] = True
result["message"] = "Successfully synced"
result["output_file"] = target_file
result["lines"] = len(content.splitlines())
if verbose:
console.print(f"[dim]✓ Synced {project_name}{target_file}[/dim]")
except Exception as e:
result["message"] = f"Sync failed: {str(e)}"
if verbose:
console.print(f"[red]✗ Failed {project_name}: {e}[/red]")
return result
def _show_sync_summary(sync_results: List[tuple], dry_run: bool) -> None:
"""Show sync operation summary."""
success_count = sum(1 for _, result in sync_results if result["success"])
total_count = len(sync_results)
error_count = total_count - success_count
# Summary table
summary_table = Table(title="Sync Summary")
summary_table.add_column("Metric", style="cyan")
summary_table.add_column("Value", style="green")
summary_table.add_row("Total Projects", str(total_count))
summary_table.add_row("Successful", str(success_count))
summary_table.add_row("Failed", str(error_count))
if not dry_run:
total_lines = sum(result["lines"] for _, result in sync_results if result["success"])
summary_table.add_row("Total Lines Generated", str(total_lines))
console.print()
console.print(summary_table)
# Show errors if any
if error_count > 0:
console.print()
console.print("[red]❌ Failed Projects:[/red]")
for project_name, result in sync_results:
if not result["success"]:
console.print(f"{project_name}: {result['message']}")
# Final status
console.print()
if dry_run:
console.print("[yellow]🔍 This was a dry run. To apply changes, run without --dry-run[/yellow]")
elif error_count == 0:
console.print("[green]🎉 All projects synced successfully![/green]")
else:
console.print(f"[yellow]⚠ Completed with {error_count} error(s)[/yellow]")
def _integrate_with_ai_gpt(project: str, components: List[str], verbose: bool) -> Optional[str]:
"""Integrate with ai.gpt for enhanced documentation generation."""
try:
from ..ai_provider import create_ai_provider
from ..persona import Persona
from ..config import Config
config = Config()
ai_root = config.data_dir.parent if config.data_dir else Path.cwd()
# Create AI provider
provider = config.get("default_provider", "ollama")
model = config.get(f"providers.{provider}.default_model", "qwen2.5")
ai_provider = create_ai_provider(provider=provider, model=model)
persona = Persona(config.data_dir)
# Create enhancement prompt
enhancement_prompt = f"""As an AI documentation expert, enhance the documentation for project '{project}'.
Project type: {project}
Components to include: {', '.join(components)}
Please provide:
1. Improved project description
2. Key features that should be highlighted
3. Usage examples
4. Integration points with other AI ecosystem projects
5. Development workflow recommendations
Focus on making the documentation more comprehensive and user-friendly."""
if verbose:
console.print("[dim]Generating AI-enhanced content...[/dim]")
# Get AI response
response, _ = persona.process_interaction(
"docs_system",
enhancement_prompt,
ai_provider
)
if verbose:
console.print("[green]✓ AI enhancement generated[/green]")
return response
except ImportError as e:
if verbose:
console.print(f"[yellow]AI integration unavailable: {e}[/yellow]")
return None
except Exception as e:
if verbose:
console.print(f"[red]AI integration error: {e}[/red]")
return None
# Add aliases for convenience
@docs_app.command("gen")
def generate_docs_alias(
project: str = typer.Option(..., "--project", "-p", help="Project name"),
output: Path = typer.Option(Path("./claude.md"), "--output", "-o", help="Output file path"),
include: str = typer.Option("core,specific", "--include", "-i", help="Components to include"),
ai_gpt_integration: bool = typer.Option(False, "--ai-gpt-integration", help="Enable ai.gpt integration"),
dry_run: bool = typer.Option(False, "--dry-run", help="Preview mode"),
verbose: bool = typer.Option(False, "--verbose", "-v", help="Verbose output"),
) -> None:
"""Alias for generate command."""
generate_docs(project, output, include, ai_gpt_integration, dry_run, verbose)
@docs_app.command("wiki")
def wiki_management(
action: str = typer.Option("update-auto", "--action", "-a", help="Action to perform (update-auto, build-home, status)"),
dir: Optional[Path] = typer.Option(None, "--dir", "-d", help="AI ecosystem root directory"),
auto_pull: bool = typer.Option(True, "--auto-pull/--no-auto-pull", help="Pull latest wiki changes before update"),
ai_enhance: bool = typer.Option(False, "--ai-enhance", help="Use AI to enhance wiki content"),
dry_run: bool = typer.Option(False, "--dry-run", help="Show what would be done without making changes"),
verbose: bool = typer.Option(False, "--verbose", "-v", help="Enable verbose output"),
) -> None:
"""Manage AI wiki generation and updates.
Automatically generates wiki pages from project claude.md files
and maintains the ai.wiki repository structure.
Actions:
- update-auto: Generate auto/ directory with project summaries
- build-home: Rebuild Home.md from all projects
- status: Show wiki repository status
Examples:
# Update auto-generated content (with auto-pull)
aigpt docs wiki --action=update-auto
# Update without pulling latest changes
aigpt docs wiki --action=update-auto --no-auto-pull
# Update with custom directory
aigpt docs wiki --action=update-auto --dir ~/ai/ai
# Preview what would be generated
aigpt docs wiki --action=update-auto --dry-run
# Check wiki status
aigpt docs wiki --action=status
"""
try:
# Load configuration
with ProgressManager("Loading configuration...") as progress:
config = load_docs_config(dir)
ai_root = get_ai_root(dir)
# Initialize wiki generator
wiki_generator = WikiGenerator(config, ai_root)
if not wiki_generator.wiki_root:
console.print("[red]❌ ai.wiki directory not found[/red]")
console.print(f"Expected location: {ai_root / 'ai.wiki'}")
console.print("Please ensure ai.wiki submodule is cloned")
raise typer.Abort()
# Show wiki information
if verbose:
console.print(f"[blue]📁 Wiki root: {wiki_generator.wiki_root}[/blue]")
console.print(f"[blue]📁 AI root: {ai_root}[/blue]")
if action == "status":
_show_wiki_status(wiki_generator, ai_root)
elif action == "update-auto":
if dry_run:
console.print("[yellow]🔍 DRY RUN MODE - No files will be modified[/yellow]")
if auto_pull:
console.print("[blue]📥 Would pull latest wiki changes[/blue]")
# Show what would be generated
project_dirs = find_project_directories(ai_root, config.list_projects())
console.print(f"[blue]📋 Would generate {len(project_dirs)} project pages:[/blue]")
for project_name in project_dirs.keys():
console.print(f" • auto/{project_name}.md")
console.print(" • Home.md")
else:
with ProgressManager("Updating wiki auto directory...") as progress:
success, updated_files = wiki_generator.update_wiki_auto_directory(
auto_pull=auto_pull,
ai_enhance=ai_enhance
)
if success:
console.print(f"[green]✅ Successfully updated {len(updated_files)} files[/green]")
if verbose:
for file in updated_files:
console.print(f"{file}")
else:
console.print("[red]❌ Failed to update wiki[/red]")
raise typer.Abort()
elif action == "build-home":
console.print("[blue]🏠 Building Home.md...[/blue]")
# This would be implemented to rebuild just Home.md
console.print("[yellow]⚠ build-home action not yet implemented[/yellow]")
else:
console.print(f"[red]Unknown action: {action}[/red]")
console.print("Available actions: update-auto, build-home, status")
raise typer.Abort()
except Exception as e:
if verbose:
console.print_exception()
else:
console.print(f"[red]Error: {e}[/red]")
raise typer.Abort()
def _show_wiki_status(wiki_generator: WikiGenerator, ai_root: Path) -> None:
"""Show wiki repository status."""
console.print("[blue]📊 AI Wiki Status[/blue]")
# Check wiki directory structure
wiki_root = wiki_generator.wiki_root
status_table = Table(title="Wiki Directory Status")
status_table.add_column("Directory", style="cyan")
status_table.add_column("Status", style="green")
status_table.add_column("Files", style="yellow")
directories = ["auto", "claude", "manual"]
for dir_name in directories:
dir_path = wiki_root / dir_name
if dir_path.exists():
file_count = len(list(dir_path.glob("*.md")))
status = "✓ Exists"
files = f"{file_count} files"
else:
status = "❌ Missing"
files = "N/A"
status_table.add_row(dir_name, status, files)
# Check Home.md
home_path = wiki_root / "Home.md"
home_status = "✓ Exists" if home_path.exists() else "❌ Missing"
status_table.add_row("Home.md", home_status, "1 file" if home_path.exists() else "N/A")
console.print(status_table)
# Show project coverage
config = wiki_generator.config
project_dirs = find_project_directories(ai_root, config.list_projects())
auto_dir = wiki_root / "auto"
if auto_dir.exists():
existing_wiki_files = set(f.stem for f in auto_dir.glob("*.md"))
available_projects = set(project_dirs.keys())
missing = available_projects - existing_wiki_files
orphaned = existing_wiki_files - available_projects
console.print(f"\n[blue]📋 Project Coverage:[/blue]")
console.print(f" • Total projects: {len(available_projects)}")
console.print(f" • Wiki pages: {len(existing_wiki_files)}")
if missing:
console.print(f" • Missing wiki pages: {', '.join(missing)}")
if orphaned:
console.print(f" • Orphaned wiki pages: {', '.join(orphaned)}")
if not missing and not orphaned:
console.print(f" • ✅ All projects have wiki pages")
@docs_app.command("config")
def docs_config(
action: str = typer.Option("show", "--action", "-a", help="Action (show, set-dir, clear-dir)"),
value: Optional[str] = typer.Option(None, "--value", "-v", help="Value to set"),
verbose: bool = typer.Option(False, "--verbose", help="Enable verbose output"),
) -> None:
"""Manage documentation configuration.
Configure default settings for aigpt docs commands to avoid
repeating options like --dir every time.
Actions:
- show: Display current configuration
- set-dir: Set default AI root directory
- clear-dir: Clear default AI root directory
Examples:
# Show current config
aigpt docs config --action=show
# Set default directory
aigpt docs config --action=set-dir --value=~/ai/ai
# Clear default directory
aigpt docs config --action=clear-dir
"""
try:
from ..config import Config
config = Config()
if action == "show":
console.print("[blue]📁 AI Documentation Configuration[/blue]")
# Show current ai_root resolution
current_ai_root = get_ai_root()
console.print(f"[green]Current AI root: {current_ai_root}[/green]")
# Show resolution method
import os
env_dir = os.getenv("AI_DOCS_DIR")
config_dir = config.get("docs.ai_root")
resolution_table = Table(title="Directory Resolution")
resolution_table.add_column("Method", style="cyan")
resolution_table.add_column("Value", style="yellow")
resolution_table.add_column("Status", style="green")
resolution_table.add_row("Environment (AI_DOCS_DIR)", env_dir or "Not set", "✓ Active" if env_dir else "Not used")
resolution_table.add_row("Config file (docs.ai_root)", config_dir or "Not set", "✓ Active" if config_dir and not env_dir else "Not used")
resolution_table.add_row("Default (relative)", str(Path(__file__).parent.parent.parent.parent.parent), "✓ Active" if not env_dir and not config_dir else "Not used")
console.print(resolution_table)
if verbose:
console.print(f"\n[dim]Config file: {config.config_file}[/dim]")
elif action == "set-dir":
if not value:
console.print("[red]Error: --value is required for set-dir action[/red]")
raise typer.Abort()
# Expand and validate path
ai_root_path = Path(value).expanduser().absolute()
if not ai_root_path.exists():
console.print(f"[yellow]Warning: Directory does not exist: {ai_root_path}[/yellow]")
if not typer.confirm("Set anyway?"):
raise typer.Abort()
# Check if ai.json exists
ai_json_path = ai_root_path / "ai.json"
if not ai_json_path.exists():
console.print(f"[yellow]Warning: ai.json not found at: {ai_json_path}[/yellow]")
if not typer.confirm("Set anyway?"):
raise typer.Abort()
# Save to config
config.set("docs.ai_root", str(ai_root_path))
console.print(f"[green]✅ Set default AI root directory: {ai_root_path}[/green]")
console.print("[dim]This will be used when --dir is not specified and AI_DOCS_DIR is not set[/dim]")
elif action == "clear-dir":
config.delete("docs.ai_root")
console.print("[green]✅ Cleared default AI root directory[/green]")
console.print("[dim]Will use default relative path when --dir and AI_DOCS_DIR are not set[/dim]")
else:
console.print(f"[red]Unknown action: {action}[/red]")
console.print("Available actions: show, set-dir, clear-dir")
raise typer.Abort()
except Exception as e:
if verbose:
console.print_exception()
else:
console.print(f"[red]Error: {e}[/red]")
raise typer.Abort()
# Export the docs app
__all__ = ["docs_app"]

View File

@@ -1,305 +0,0 @@
"""Submodule management commands for ai.gpt."""
from pathlib import Path
from typing import Dict, List, Optional, Tuple
import subprocess
import json
import typer
from rich.console import Console
from rich.panel import Panel
from rich.table import Table
from ..docs.config import get_ai_root, load_docs_config
from ..docs.git_utils import (
check_git_repository,
get_git_branch,
get_git_remote_url
)
from ..docs.utils import run_command
console = Console()
submodules_app = typer.Typer(help="Submodule management for AI ecosystem")
def get_submodules_from_gitmodules(repo_path: Path) -> Dict[str, str]:
"""Parse .gitmodules file to get submodule information."""
gitmodules_path = repo_path / ".gitmodules"
if not gitmodules_path.exists():
return {}
submodules = {}
current_name = None
with open(gitmodules_path, 'r') as f:
for line in f:
line = line.strip()
if line.startswith('[submodule "') and line.endswith('"]'):
current_name = line[12:-2] # Extract module name
elif line.startswith('path = ') and current_name:
path = line[7:] # Extract path
submodules[current_name] = path
current_name = None
return submodules
def get_branch_for_module(config, module_name: str) -> str:
"""Get target branch for a module from ai.json."""
project_info = config.get_project_info(module_name)
if project_info and project_info.branch:
return project_info.branch
return "main" # Default branch
@submodules_app.command("list")
def list_submodules(
dir: Optional[Path] = typer.Option(None, "--dir", "-d", help="AI ecosystem root directory"),
verbose: bool = typer.Option(False, "--verbose", "-v", help="Show detailed information")
):
"""List all submodules and their status."""
try:
config = load_docs_config(dir)
ai_root = get_ai_root(dir)
if not check_git_repository(ai_root):
console.print("[red]Error: Not a git repository[/red]")
raise typer.Abort()
submodules = get_submodules_from_gitmodules(ai_root)
if not submodules:
console.print("[yellow]No submodules found[/yellow]")
return
table = Table(title="Submodules Status")
table.add_column("Module", style="cyan")
table.add_column("Path", style="blue")
table.add_column("Branch", style="green")
table.add_column("Status", style="yellow")
for module_name, module_path in submodules.items():
full_path = ai_root / module_path
if not full_path.exists():
status = "❌ Missing"
branch = "N/A"
else:
branch = get_git_branch(full_path) or "detached"
# Check if submodule is up to date
returncode, stdout, stderr = run_command(
["git", "submodule", "status", module_path],
cwd=ai_root
)
if returncode == 0 and stdout:
status_char = stdout[0] if stdout else ' '
if status_char == ' ':
status = "✅ Clean"
elif status_char == '+':
status = "📝 Modified"
elif status_char == '-':
status = "❌ Not initialized"
elif status_char == 'U':
status = "⚠️ Conflicts"
else:
status = "❓ Unknown"
else:
status = "❓ Unknown"
target_branch = get_branch_for_module(config, module_name)
branch_display = f"{branch}"
if branch != target_branch:
branch_display += f" (target: {target_branch})"
table.add_row(module_name, module_path, branch_display, status)
console.print(table)
if verbose:
console.print(f"\n[dim]Total submodules: {len(submodules)}[/dim]")
console.print(f"[dim]Repository root: {ai_root}[/dim]")
except Exception as e:
console.print(f"[red]Error: {e}[/red]")
raise typer.Abort()
@submodules_app.command("update")
def update_submodules(
module: Optional[str] = typer.Option(None, "--module", "-m", help="Update specific submodule"),
all: bool = typer.Option(False, "--all", "-a", help="Update all submodules"),
dir: Optional[Path] = typer.Option(None, "--dir", "-d", help="AI ecosystem root directory"),
dry_run: bool = typer.Option(False, "--dry-run", help="Show what would be done"),
auto_commit: bool = typer.Option(False, "--auto-commit", help="Auto-commit changes"),
verbose: bool = typer.Option(False, "--verbose", "-v", help="Show detailed output")
):
"""Update submodules to latest commits."""
if not module and not all:
console.print("[red]Error: Either --module or --all is required[/red]")
raise typer.Abort()
if module and all:
console.print("[red]Error: Cannot use both --module and --all[/red]")
raise typer.Abort()
try:
config = load_docs_config(dir)
ai_root = get_ai_root(dir)
if not check_git_repository(ai_root):
console.print("[red]Error: Not a git repository[/red]")
raise typer.Abort()
submodules = get_submodules_from_gitmodules(ai_root)
if not submodules:
console.print("[yellow]No submodules found[/yellow]")
return
# Determine which modules to update
if all:
modules_to_update = list(submodules.keys())
else:
if module not in submodules:
console.print(f"[red]Error: Submodule '{module}' not found[/red]")
console.print(f"Available modules: {', '.join(submodules.keys())}")
raise typer.Abort()
modules_to_update = [module]
if dry_run:
console.print("[yellow]🔍 DRY RUN MODE - No changes will be made[/yellow]")
console.print(f"[cyan]Updating {len(modules_to_update)} submodule(s)...[/cyan]")
updated_modules = []
for module_name in modules_to_update:
module_path = submodules[module_name]
full_path = ai_root / module_path
target_branch = get_branch_for_module(config, module_name)
console.print(f"\n[blue]📦 Processing: {module_name}[/blue]")
if not full_path.exists():
console.print(f"[red]❌ Module directory not found: {module_path}[/red]")
continue
# Get current commit
current_commit = None
returncode, stdout, stderr = run_command(
["git", "rev-parse", "HEAD"],
cwd=full_path
)
if returncode == 0:
current_commit = stdout.strip()[:8]
if dry_run:
console.print(f"[yellow]🔍 Would update {module_name} to branch {target_branch}[/yellow]")
if current_commit:
console.print(f"[dim]Current: {current_commit}[/dim]")
continue
# Fetch latest changes
console.print(f"[dim]Fetching latest changes...[/dim]")
returncode, stdout, stderr = run_command(
["git", "fetch", "origin"],
cwd=full_path
)
if returncode != 0:
console.print(f"[red]❌ Failed to fetch: {stderr}[/red]")
continue
# Check if update is needed
returncode, stdout, stderr = run_command(
["git", "rev-parse", f"origin/{target_branch}"],
cwd=full_path
)
if returncode != 0:
console.print(f"[red]❌ Branch {target_branch} not found on remote[/red]")
continue
latest_commit = stdout.strip()[:8]
if current_commit == latest_commit:
console.print(f"[green]✅ Already up to date[/green]")
continue
# Switch to target branch and pull
console.print(f"[dim]Switching to branch {target_branch}...[/dim]")
returncode, stdout, stderr = run_command(
["git", "checkout", target_branch],
cwd=full_path
)
if returncode != 0:
console.print(f"[red]❌ Failed to checkout {target_branch}: {stderr}[/red]")
continue
returncode, stdout, stderr = run_command(
["git", "pull", "origin", target_branch],
cwd=full_path
)
if returncode != 0:
console.print(f"[red]❌ Failed to pull: {stderr}[/red]")
continue
# Get new commit
returncode, stdout, stderr = run_command(
["git", "rev-parse", "HEAD"],
cwd=full_path
)
new_commit = stdout.strip()[:8] if returncode == 0 else "unknown"
# Stage the submodule update
returncode, stdout, stderr = run_command(
["git", "add", module_path],
cwd=ai_root
)
console.print(f"[green]✅ Updated {module_name} ({current_commit}{new_commit})[/green]")
updated_modules.append((module_name, current_commit, new_commit))
# Summary
if updated_modules:
console.print(f"\n[green]🎉 Successfully updated {len(updated_modules)} module(s)[/green]")
if verbose:
for module_name, old_commit, new_commit in updated_modules:
console.print(f"{module_name}: {old_commit}{new_commit}")
if auto_commit and not dry_run:
console.print("[blue]💾 Auto-committing changes...[/blue]")
commit_message = f"Update submodules\n\n📦 Updated modules: {len(updated_modules)}\n"
for module_name, old_commit, new_commit in updated_modules:
commit_message += f"- {module_name}: {old_commit}{new_commit}\n"
commit_message += "\n🤖 Generated with ai.gpt submodules update"
returncode, stdout, stderr = run_command(
["git", "commit", "-m", commit_message],
cwd=ai_root
)
if returncode == 0:
console.print("[green]✅ Changes committed successfully[/green]")
else:
console.print(f"[red]❌ Failed to commit: {stderr}[/red]")
elif not dry_run:
console.print("[yellow]💾 Changes staged but not committed[/yellow]")
console.print("Run with --auto-commit to commit automatically")
elif not dry_run:
console.print("[yellow]No modules needed updating[/yellow]")
except Exception as e:
console.print(f"[red]Error: {e}[/red]")
if verbose:
console.print_exception()
raise typer.Abort()
# Export the submodules app
__all__ = ["submodules_app"]

View File

@@ -1,440 +0,0 @@
"""Claude Code token usage and cost analysis commands."""
from pathlib import Path
from typing import Dict, List, Optional, Tuple
from datetime import datetime, timedelta
import json
import sqlite3
import typer
from rich.console import Console
from rich.panel import Panel
from rich.table import Table
from rich.progress import track
console = Console()
tokens_app = typer.Typer(help="Claude Code token usage and cost analysis")
# Claude Code pricing (estimated rates in USD)
CLAUDE_PRICING = {
"input_tokens_per_1k": 0.003, # $3 per 1M input tokens
"output_tokens_per_1k": 0.015, # $15 per 1M output tokens
"usd_to_jpy": 150 # Exchange rate
}
def find_claude_data_dir() -> Optional[Path]:
"""Find Claude Code data directory."""
possible_paths = [
Path.home() / ".claude",
Path.home() / ".config" / "claude",
Path.cwd() / ".claude"
]
for path in possible_paths:
if path.exists() and (path / "projects").exists():
return path
return None
def parse_jsonl_files(claude_dir: Path) -> List[Dict]:
"""Parse Claude Code JSONL files safely."""
records = []
projects_dir = claude_dir / "projects"
if not projects_dir.exists():
return records
# Find all .jsonl files recursively
jsonl_files = list(projects_dir.rglob("*.jsonl"))
for jsonl_file in track(jsonl_files, description="Reading Claude data..."):
try:
with open(jsonl_file, 'r', encoding='utf-8') as f:
for line_num, line in enumerate(f, 1):
line = line.strip()
if not line:
continue
try:
record = json.loads(line)
# Only include records with usage information
if (record.get('type') == 'assistant' and
'message' in record and
'usage' in record.get('message', {})):
records.append(record)
except json.JSONDecodeError:
# Skip malformed JSON lines
continue
except (IOError, PermissionError):
# Skip files we can't read
continue
return records
def calculate_costs(records: List[Dict]) -> Dict[str, float]:
"""Calculate token costs from usage records."""
total_input_tokens = 0
total_output_tokens = 0
total_cost_usd = 0
for record in records:
try:
usage = record.get('message', {}).get('usage', {})
input_tokens = int(usage.get('input_tokens', 0))
output_tokens = int(usage.get('output_tokens', 0))
# Calculate cost if not provided
cost_usd = record.get('costUSD')
if cost_usd is None:
input_cost = (input_tokens / 1000) * CLAUDE_PRICING["input_tokens_per_1k"]
output_cost = (output_tokens / 1000) * CLAUDE_PRICING["output_tokens_per_1k"]
cost_usd = input_cost + output_cost
else:
cost_usd = float(cost_usd)
total_input_tokens += input_tokens
total_output_tokens += output_tokens
total_cost_usd += cost_usd
except (ValueError, TypeError, KeyError):
# Skip records with invalid data
continue
return {
'input_tokens': total_input_tokens,
'output_tokens': total_output_tokens,
'total_tokens': total_input_tokens + total_output_tokens,
'cost_usd': total_cost_usd,
'cost_jpy': total_cost_usd * CLAUDE_PRICING["usd_to_jpy"]
}
def group_by_date(records: List[Dict]) -> Dict[str, Dict]:
"""Group records by date and calculate daily costs."""
daily_stats = {}
for record in records:
try:
timestamp = record.get('timestamp')
if not timestamp:
continue
# Parse timestamp and convert to JST
dt = datetime.fromisoformat(timestamp.replace('Z', '+00:00'))
# Convert to JST (UTC+9)
jst_dt = dt + timedelta(hours=9)
date_key = jst_dt.strftime('%Y-%m-%d')
if date_key not in daily_stats:
daily_stats[date_key] = []
daily_stats[date_key].append(record)
except (ValueError, TypeError):
continue
# Calculate costs for each day
daily_costs = {}
for date_key, day_records in daily_stats.items():
daily_costs[date_key] = calculate_costs(day_records)
return daily_costs
@tokens_app.command("summary")
def token_summary(
period: str = typer.Option("all", help="Period: today, week, month, all"),
claude_dir: Optional[Path] = typer.Option(None, "--claude-dir", help="Claude data directory"),
show_details: bool = typer.Option(False, "--details", help="Show detailed breakdown"),
format: str = typer.Option("table", help="Output format: table, json")
):
"""Show Claude Code token usage summary and estimated costs."""
# Find Claude data directory
if claude_dir is None:
claude_dir = find_claude_data_dir()
if claude_dir is None:
console.print("[red]❌ Claude Code data directory not found[/red]")
console.print("[dim]Looked in: ~/.claude, ~/.config/claude, ./.claude[/dim]")
raise typer.Abort()
if not claude_dir.exists():
console.print(f"[red]❌ Directory not found: {claude_dir}[/red]")
raise typer.Abort()
console.print(f"[cyan]📊 Analyzing Claude Code usage from: {claude_dir}[/cyan]")
# Parse data
records = parse_jsonl_files(claude_dir)
if not records:
console.print("[yellow]⚠️ No usage data found[/yellow]")
return
# Filter by period
now = datetime.now()
filtered_records = []
if period == "today":
today = now.strftime('%Y-%m-%d')
for record in records:
try:
timestamp = record.get('timestamp')
if timestamp:
dt = datetime.fromisoformat(timestamp.replace('Z', '+00:00'))
jst_dt = dt + timedelta(hours=9)
if jst_dt.strftime('%Y-%m-%d') == today:
filtered_records.append(record)
except (ValueError, TypeError):
continue
elif period == "week":
week_ago = now - timedelta(days=7)
for record in records:
try:
timestamp = record.get('timestamp')
if timestamp:
dt = datetime.fromisoformat(timestamp.replace('Z', '+00:00'))
jst_dt = dt + timedelta(hours=9)
if jst_dt.date() >= week_ago.date():
filtered_records.append(record)
except (ValueError, TypeError):
continue
elif period == "month":
month_ago = now - timedelta(days=30)
for record in records:
try:
timestamp = record.get('timestamp')
if timestamp:
dt = datetime.fromisoformat(timestamp.replace('Z', '+00:00'))
jst_dt = dt + timedelta(hours=9)
if jst_dt.date() >= month_ago.date():
filtered_records.append(record)
except (ValueError, TypeError):
continue
else: # all
filtered_records = records
# Calculate total costs
total_stats = calculate_costs(filtered_records)
if format == "json":
# JSON output
output = {
"period": period,
"total_records": len(filtered_records),
"input_tokens": total_stats['input_tokens'],
"output_tokens": total_stats['output_tokens'],
"total_tokens": total_stats['total_tokens'],
"estimated_cost_usd": round(total_stats['cost_usd'], 2),
"estimated_cost_jpy": round(total_stats['cost_jpy'], 0)
}
console.print(json.dumps(output, indent=2))
return
# Table output
console.print(Panel(
f"[bold cyan]Claude Code Token Usage Report[/bold cyan]\n\n"
f"Period: {period.title()}\n"
f"Data source: {claude_dir}",
title="📊 Usage Analysis",
border_style="cyan"
))
# Summary table
summary_table = Table(title="Token Summary")
summary_table.add_column("Metric", style="cyan")
summary_table.add_column("Value", style="green")
summary_table.add_row("Input Tokens", f"{total_stats['input_tokens']:,}")
summary_table.add_row("Output Tokens", f"{total_stats['output_tokens']:,}")
summary_table.add_row("Total Tokens", f"{total_stats['total_tokens']:,}")
summary_table.add_row("", "") # Separator
summary_table.add_row("Estimated Cost (USD)", f"${total_stats['cost_usd']:.2f}")
summary_table.add_row("Estimated Cost (JPY)", f"¥{total_stats['cost_jpy']:,.0f}")
summary_table.add_row("Records Analyzed", str(len(filtered_records)))
console.print(summary_table)
# Show daily breakdown if requested
if show_details:
daily_costs = group_by_date(filtered_records)
if daily_costs:
console.print("\n")
daily_table = Table(title="Daily Breakdown")
daily_table.add_column("Date", style="cyan")
daily_table.add_column("Input Tokens", style="blue")
daily_table.add_column("Output Tokens", style="green")
daily_table.add_column("Total Tokens", style="yellow")
daily_table.add_column("Cost (JPY)", style="red")
for date in sorted(daily_costs.keys(), reverse=True):
stats = daily_costs[date]
daily_table.add_row(
date,
f"{stats['input_tokens']:,}",
f"{stats['output_tokens']:,}",
f"{stats['total_tokens']:,}",
f"¥{stats['cost_jpy']:,.0f}"
)
console.print(daily_table)
# Warning about estimates
console.print("\n[dim]💡 Note: Costs are estimates based on Claude API pricing.[/dim]")
console.print("[dim] Actual Claude Code subscription costs may differ.[/dim]")
@tokens_app.command("daily")
def daily_breakdown(
days: int = typer.Option(7, help="Number of days to show"),
claude_dir: Optional[Path] = typer.Option(None, "--claude-dir", help="Claude data directory"),
):
"""Show daily token usage breakdown."""
# Find Claude data directory
if claude_dir is None:
claude_dir = find_claude_data_dir()
if claude_dir is None:
console.print("[red]❌ Claude Code data directory not found[/red]")
raise typer.Abort()
console.print(f"[cyan]📅 Daily token usage (last {days} days)[/cyan]")
# Parse data
records = parse_jsonl_files(claude_dir)
if not records:
console.print("[yellow]⚠️ No usage data found[/yellow]")
return
# Group by date
daily_costs = group_by_date(records)
# Get recent days
recent_dates = sorted(daily_costs.keys(), reverse=True)[:days]
if not recent_dates:
console.print("[yellow]No recent usage data found[/yellow]")
return
# Create table
table = Table(title=f"Daily Usage (Last {len(recent_dates)} days)")
table.add_column("Date", style="cyan")
table.add_column("Input", style="blue")
table.add_column("Output", style="green")
table.add_column("Total", style="yellow")
table.add_column("Cost (JPY)", style="red")
total_cost = 0
for date in recent_dates:
stats = daily_costs[date]
total_cost += stats['cost_jpy']
table.add_row(
date,
f"{stats['input_tokens']:,}",
f"{stats['output_tokens']:,}",
f"{stats['total_tokens']:,}",
f"¥{stats['cost_jpy']:,.0f}"
)
# Add total row
table.add_row(
"──────────",
"────────",
"────────",
"────────",
"──────────"
)
table.add_row(
"【Total】",
"",
"",
"",
f"¥{total_cost:,.0f}"
)
console.print(table)
console.print(f"\n[green]Total estimated cost for {len(recent_dates)} days: ¥{total_cost:,.0f}[/green]")
@tokens_app.command("status")
def token_status(
claude_dir: Optional[Path] = typer.Option(None, "--claude-dir", help="Claude data directory"),
):
"""Check Claude Code data availability and basic stats."""
# Find Claude data directory
if claude_dir is None:
claude_dir = find_claude_data_dir()
console.print("[cyan]🔍 Claude Code Data Status[/cyan]")
if claude_dir is None:
console.print("[red]❌ Claude Code data directory not found[/red]")
console.print("\n[yellow]Searched locations:[/yellow]")
console.print(" • ~/.claude")
console.print(" • ~/.config/claude")
console.print(" • ./.claude")
console.print("\n[dim]Make sure Claude Code is installed and has been used.[/dim]")
return
console.print(f"[green]✅ Found data directory: {claude_dir}[/green]")
projects_dir = claude_dir / "projects"
if not projects_dir.exists():
console.print("[yellow]⚠️ No projects directory found[/yellow]")
return
# Count files
jsonl_files = list(projects_dir.rglob("*.jsonl"))
console.print(f"[blue]📂 Found {len(jsonl_files)} JSONL files[/blue]")
if jsonl_files:
# Parse sample to check data quality
sample_records = []
for jsonl_file in jsonl_files[:3]: # Check first 3 files
try:
with open(jsonl_file, 'r') as f:
for line in f:
if line.strip():
try:
record = json.loads(line.strip())
sample_records.append(record)
if len(sample_records) >= 10:
break
except json.JSONDecodeError:
continue
if len(sample_records) >= 10:
break
except IOError:
continue
usage_records = [r for r in sample_records
if r.get('type') == 'assistant' and
'usage' in r.get('message', {})]
console.print(f"[green]📊 Found {len(usage_records)} usage records in sample[/green]")
if usage_records:
console.print("[blue]✅ Data appears valid for cost analysis[/blue]")
console.print("\n[dim]Run 'aigpt tokens summary' for full analysis[/dim]")
else:
console.print("[yellow]⚠️ No usage data found in sample[/yellow]")
else:
console.print("[yellow]⚠️ No JSONL files found[/yellow]")
# Export the tokens app
__all__ = ["tokens_app"]

View File

@@ -1,184 +0,0 @@
"""Configuration management for ai.gpt"""
import json
import os
from pathlib import Path
from typing import Optional, Dict, Any
import logging
class Config:
"""Manages configuration settings"""
def __init__(self, config_dir: Optional[Path] = None):
if config_dir is None:
config_dir = Path.home() / ".config" / "syui" / "ai" / "gpt"
self.config_dir = config_dir
self.config_file = config_dir / "config.json"
self.data_dir = config_dir / "data"
# Create directories if they don't exist
self.config_dir.mkdir(parents=True, exist_ok=True)
self.data_dir.mkdir(parents=True, exist_ok=True)
self.logger = logging.getLogger(__name__)
self._config: Dict[str, Any] = {}
self._load_config()
def _load_config(self):
"""Load configuration from file"""
if self.config_file.exists():
try:
with open(self.config_file, 'r', encoding='utf-8') as f:
self._config = json.load(f)
except Exception as e:
self.logger.error(f"Failed to load config: {e}")
self._config = {}
else:
# Initialize with default config
self._config = {
"providers": {
"openai": {
"api_key": None,
"default_model": "gpt-4o-mini",
"system_prompt": None
},
"ollama": {
"host": "http://localhost:11434",
"default_model": "qwen3:latest",
"system_prompt": None
}
},
"mcp": {
"enabled": True,
"auto_detect": True,
"servers": {
"ai_gpt": {
"name": "ai.gpt MCP Server",
"base_url": "http://localhost:8001",
"endpoints": {
"get_memories": "/get_memories",
"search_memories": "/search_memories",
"get_contextual_memories": "/get_contextual_memories",
"process_interaction": "/process_interaction",
"get_relationship": "/get_relationship",
"get_all_relationships": "/get_all_relationships",
"get_persona_state": "/get_persona_state",
"get_fortune": "/get_fortune",
"run_maintenance": "/run_maintenance",
"execute_command": "/execute_command",
"analyze_file": "/analyze_file",
"remote_shell": "/remote_shell",
"ai_bot_status": "/ai_bot_status"
},
"timeout": 10.0
},
"ai_card": {
"name": "ai.card MCP Server",
"base_url": "http://localhost:8000",
"endpoints": {
"health": "/health",
"get_user_cards": "/api/cards/user",
"gacha": "/api/gacha",
"sync_atproto": "/api/sync"
},
"timeout": 5.0
}
}
},
"atproto": {
"handle": None,
"password": None,
"host": "https://bsky.social"
},
"default_provider": "ollama"
}
self._save_config()
def _save_config(self):
"""Save configuration to file"""
try:
with open(self.config_file, 'w', encoding='utf-8') as f:
json.dump(self._config, f, indent=2)
except Exception as e:
self.logger.error(f"Failed to save config: {e}")
def get(self, key: str, default: Any = None) -> Any:
"""Get configuration value using dot notation"""
keys = key.split('.')
value = self._config
for k in keys:
if isinstance(value, dict) and k in value:
value = value[k]
else:
return default
return value
def set(self, key: str, value: Any):
"""Set configuration value using dot notation"""
keys = key.split('.')
config = self._config
# Navigate to the parent dictionary
for k in keys[:-1]:
if k not in config:
config[k] = {}
config = config[k]
# Set the value
config[keys[-1]] = value
self._save_config()
def delete(self, key: str) -> bool:
"""Delete configuration value"""
keys = key.split('.')
config = self._config
# Navigate to the parent dictionary
for k in keys[:-1]:
if k not in config:
return False
config = config[k]
# Delete the key if it exists
if keys[-1] in config:
del config[keys[-1]]
self._save_config()
return True
return False
def list_keys(self, prefix: str = "") -> list[str]:
"""List all configuration keys with optional prefix"""
def _get_keys(config: dict, current_prefix: str = "") -> list[str]:
keys = []
for k, v in config.items():
full_key = f"{current_prefix}.{k}" if current_prefix else k
if isinstance(v, dict):
keys.extend(_get_keys(v, full_key))
else:
keys.append(full_key)
return keys
all_keys = _get_keys(self._config)
if prefix:
return [k for k in all_keys if k.startswith(prefix)]
return all_keys
def get_api_key(self, provider: str) -> Optional[str]:
"""Get API key for a specific provider"""
key = self.get(f"providers.{provider}.api_key")
# Also check environment variables
if not key and provider == "openai":
key = os.getenv("OPENAI_API_KEY")
return key
def get_provider_config(self, provider: str) -> Dict[str, Any]:
"""Get complete configuration for a provider"""
return self.get(f"providers.{provider}", {})

View File

@@ -1 +0,0 @@
"""Documentation management module for ai.gpt."""

View File

@@ -1,150 +0,0 @@
"""Configuration management for documentation system."""
import json
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
from pydantic import BaseModel, Field
class GitConfig(BaseModel):
"""Git configuration."""
host: str = "git.syui.ai"
protocol: str = "ssh"
class AtprotoConfig(BaseModel):
"""Atproto configuration."""
host: str = "syu.is"
protocol: str = "at"
at_url: str = "at://ai.syu.is"
did: str = "did:plc:6qyecktefllvenje24fcxnie"
web: str = "https://web.syu.is/@ai"
class ProjectMetadata(BaseModel):
"""Project metadata."""
last_updated: str
structure_version: str
domain: List[str]
git: GitConfig
atproto: AtprotoConfig
class ProjectInfo(BaseModel):
"""Individual project information."""
type: Union[str, List[str]] # Support both string and list
text: str
status: str
branch: str = "main"
git_url: Optional[str] = None
detailed_specs: Optional[str] = None
data_reference: Optional[str] = None
features: Optional[str] = None
class AIConfig(BaseModel):
"""AI projects configuration."""
ai: ProjectInfo
gpt: ProjectInfo
os: ProjectInfo
game: ProjectInfo
bot: ProjectInfo
moji: ProjectInfo
card: ProjectInfo
api: ProjectInfo
log: ProjectInfo
verse: ProjectInfo
shell: ProjectInfo
class DocsConfig(BaseModel):
"""Main documentation configuration model."""
version: int = 2
metadata: ProjectMetadata
ai: AIConfig
data: Dict[str, Any] = Field(default_factory=dict)
deprecated: Dict[str, Any] = Field(default_factory=dict)
@classmethod
def load_from_file(cls, config_path: Path) -> "DocsConfig":
"""Load configuration from ai.json file."""
if not config_path.exists():
raise FileNotFoundError(f"Configuration file not found: {config_path}")
with open(config_path, "r", encoding="utf-8") as f:
data = json.load(f)
return cls(**data)
def get_project_info(self, project_name: str) -> Optional[ProjectInfo]:
"""Get project information by name."""
return getattr(self.ai, project_name, None)
def get_project_git_url(self, project_name: str) -> str:
"""Get git URL for project."""
project = self.get_project_info(project_name)
if project and project.git_url:
return project.git_url
# Construct URL from metadata
host = self.metadata.git.host
protocol = self.metadata.git.protocol
if protocol == "ssh":
return f"git@{host}:ai/{project_name}"
else:
return f"https://{host}/ai/{project_name}"
def get_project_branch(self, project_name: str) -> str:
"""Get branch for project."""
project = self.get_project_info(project_name)
return project.branch if project else "main"
def list_projects(self) -> List[str]:
"""List all available projects."""
return list(self.ai.__fields__.keys())
def get_ai_root(custom_dir: Optional[Path] = None) -> Path:
"""Get AI ecosystem root directory.
Priority order:
1. --dir option (custom_dir parameter)
2. AI_DOCS_DIR environment variable
3. ai.gpt config file (docs.ai_root)
4. Default relative path
"""
if custom_dir:
return custom_dir
# Check environment variable
import os
env_dir = os.getenv("AI_DOCS_DIR")
if env_dir:
return Path(env_dir)
# Check ai.gpt config file
try:
from ..config import Config
config = Config()
config_ai_root = config.get("docs.ai_root")
if config_ai_root:
return Path(config_ai_root).expanduser()
except Exception:
# If config loading fails, continue to default
pass
# Default: From gpt/src/aigpt/docs/config.py, go up to ai/ root
return Path(__file__).parent.parent.parent.parent.parent
def get_claude_root(custom_dir: Optional[Path] = None) -> Path:
"""Get Claude documentation root directory."""
return get_ai_root(custom_dir) / "claude"
def load_docs_config(custom_dir: Optional[Path] = None) -> DocsConfig:
"""Load documentation configuration."""
config_path = get_ai_root(custom_dir) / "ai.json"
return DocsConfig.load_from_file(config_path)

View File

@@ -1,397 +0,0 @@
"""Git utilities for documentation management."""
import subprocess
from pathlib import Path
from typing import List, Optional, Tuple
from rich.console import Console
from rich.progress import track
from .utils import run_command
console = Console()
def check_git_repository(path: Path) -> bool:
"""Check if path is a git repository."""
return (path / ".git").exists()
def get_submodules_status(repo_path: Path) -> List[dict]:
"""Get status of all submodules."""
if not check_git_repository(repo_path):
return []
returncode, stdout, stderr = run_command(
["git", "submodule", "status"],
cwd=repo_path
)
if returncode != 0:
return []
submodules = []
for line in stdout.strip().splitlines():
if line.strip():
# Parse git submodule status output
# Format: " commit_hash path (tag)" or "-commit_hash path" (not initialized)
parts = line.strip().split()
if len(parts) >= 2:
status_char = line[0] if line else ' '
commit = parts[0].lstrip('-+ ')
path = parts[1]
submodules.append({
"path": path,
"commit": commit,
"initialized": status_char != '-',
"modified": status_char == '+',
"status": status_char
})
return submodules
def init_and_update_submodules(repo_path: Path, specific_paths: Optional[List[str]] = None) -> Tuple[bool, str]:
"""Initialize and update submodules."""
if not check_git_repository(repo_path):
return False, "Not a git repository"
try:
# Initialize submodules
console.print("[blue]🔧 Initializing submodules...[/blue]")
returncode, stdout, stderr = run_command(
["git", "submodule", "init"],
cwd=repo_path
)
if returncode != 0:
return False, f"Failed to initialize submodules: {stderr}"
# Update submodules
console.print("[blue]📦 Updating submodules...[/blue]")
if specific_paths:
# Update specific submodules
for path in specific_paths:
console.print(f"[dim]Updating {path}...[/dim]")
returncode, stdout, stderr = run_command(
["git", "submodule", "update", "--init", "--recursive", path],
cwd=repo_path
)
if returncode != 0:
return False, f"Failed to update submodule {path}: {stderr}"
else:
# Update all submodules
returncode, stdout, stderr = run_command(
["git", "submodule", "update", "--init", "--recursive"],
cwd=repo_path
)
if returncode != 0:
return False, f"Failed to update submodules: {stderr}"
console.print("[green]✅ Submodules updated successfully[/green]")
return True, "Submodules updated successfully"
except Exception as e:
return False, f"Error updating submodules: {str(e)}"
def clone_missing_submodules(repo_path: Path, ai_config) -> Tuple[bool, List[str]]:
"""Clone missing submodules based on ai.json configuration."""
if not check_git_repository(repo_path):
return False, ["Not a git repository"]
try:
# Get current submodules
current_submodules = get_submodules_status(repo_path)
current_paths = {sub["path"] for sub in current_submodules}
# Get expected projects from ai.json
expected_projects = ai_config.list_projects()
# Find missing submodules
missing_submodules = []
for project in expected_projects:
if project not in current_paths:
# Check if directory exists but is not a submodule
project_path = repo_path / project
if not project_path.exists():
missing_submodules.append(project)
if not missing_submodules:
console.print("[green]✅ All submodules are present[/green]")
return True, []
console.print(f"[yellow]📋 Found {len(missing_submodules)} missing submodules: {missing_submodules}[/yellow]")
# Clone missing submodules
cloned = []
for project in track(missing_submodules, description="Cloning missing submodules..."):
git_url = ai_config.get_project_git_url(project)
branch = ai_config.get_project_branch(project)
console.print(f"[blue]📦 Adding submodule: {project}[/blue]")
console.print(f"[dim]URL: {git_url}[/dim]")
console.print(f"[dim]Branch: {branch}[/dim]")
returncode, stdout, stderr = run_command(
["git", "submodule", "add", "-b", branch, git_url, project],
cwd=repo_path
)
if returncode == 0:
cloned.append(project)
console.print(f"[green]✅ Added {project}[/green]")
else:
console.print(f"[red]❌ Failed to add {project}: {stderr}[/red]")
if cloned:
console.print(f"[green]🎉 Successfully cloned {len(cloned)} submodules[/green]")
return True, cloned
except Exception as e:
return False, [f"Error cloning submodules: {str(e)}"]
def ensure_submodules_available(repo_path: Path, ai_config, auto_clone: bool = True) -> Tuple[bool, List[str]]:
"""Ensure all submodules are available, optionally cloning missing ones."""
console.print("[blue]🔍 Checking submodule status...[/blue]")
# Get current submodule status
submodules = get_submodules_status(repo_path)
# Check for uninitialized submodules
uninitialized = [sub for sub in submodules if not sub["initialized"]]
if uninitialized:
console.print(f"[yellow]📦 Found {len(uninitialized)} uninitialized submodules[/yellow]")
if auto_clone:
success, message = init_and_update_submodules(
repo_path,
[sub["path"] for sub in uninitialized]
)
if not success:
return False, [message]
else:
return False, [f"Uninitialized submodules: {[sub['path'] for sub in uninitialized]}"]
# Check for missing submodules (not in .gitmodules but expected)
if auto_clone:
success, cloned = clone_missing_submodules(repo_path, ai_config)
if not success:
return False, cloned
# If we cloned new submodules, update all to be safe
if cloned:
success, message = init_and_update_submodules(repo_path)
if not success:
return False, [message]
return True, []
def get_git_branch(repo_path: Path) -> Optional[str]:
"""Get current git branch."""
if not check_git_repository(repo_path):
return None
returncode, stdout, stderr = run_command(
["git", "branch", "--show-current"],
cwd=repo_path
)
if returncode == 0:
return stdout.strip()
return None
def get_git_remote_url(repo_path: Path, remote: str = "origin") -> Optional[str]:
"""Get git remote URL."""
if not check_git_repository(repo_path):
return None
returncode, stdout, stderr = run_command(
["git", "remote", "get-url", remote],
cwd=repo_path
)
if returncode == 0:
return stdout.strip()
return None
def pull_repository(repo_path: Path, branch: Optional[str] = None) -> Tuple[bool, str]:
"""Pull latest changes from remote repository."""
if not check_git_repository(repo_path):
return False, "Not a git repository"
try:
# Get current branch if not specified
if branch is None:
branch = get_git_branch(repo_path)
if not branch:
# If in detached HEAD state, try to switch to main
console.print("[yellow]⚠️ Repository in detached HEAD state, switching to main...[/yellow]")
returncode, stdout, stderr = run_command(
["git", "checkout", "main"],
cwd=repo_path
)
if returncode == 0:
branch = "main"
console.print("[green]✅ Switched to main branch[/green]")
else:
return False, f"Could not switch to main branch: {stderr}"
console.print(f"[blue]📥 Pulling latest changes for branch: {branch}[/blue]")
# Check if we have uncommitted changes
returncode, stdout, stderr = run_command(
["git", "status", "--porcelain"],
cwd=repo_path
)
if returncode == 0 and stdout.strip():
console.print("[yellow]⚠️ Repository has uncommitted changes[/yellow]")
console.print("[dim]Consider committing changes before pull[/dim]")
# Continue anyway, git will handle conflicts
# Fetch latest changes
console.print("[dim]Fetching from remote...[/dim]")
returncode, stdout, stderr = run_command(
["git", "fetch", "origin"],
cwd=repo_path
)
if returncode != 0:
return False, f"Failed to fetch: {stderr}"
# Pull changes
returncode, stdout, stderr = run_command(
["git", "pull", "origin", branch],
cwd=repo_path
)
if returncode != 0:
# Check if it's a merge conflict
if "CONFLICT" in stderr or "conflict" in stderr.lower():
return False, f"Merge conflicts detected: {stderr}"
return False, f"Failed to pull: {stderr}"
# Check if there were any changes
if "Already up to date" in stdout or "Already up-to-date" in stdout:
console.print("[green]✅ Repository already up to date[/green]")
else:
console.print("[green]✅ Successfully pulled latest changes[/green]")
if stdout.strip():
console.print(f"[dim]{stdout.strip()}[/dim]")
return True, "Successfully pulled latest changes"
except Exception as e:
return False, f"Error pulling repository: {str(e)}"
def pull_wiki_repository(wiki_path: Path) -> Tuple[bool, str]:
"""Pull latest changes from wiki repository before generating content."""
if not wiki_path.exists():
return False, f"Wiki directory not found: {wiki_path}"
if not check_git_repository(wiki_path):
return False, f"Wiki directory is not a git repository: {wiki_path}"
console.print(f"[blue]📚 Updating wiki repository: {wiki_path.name}[/blue]")
return pull_repository(wiki_path)
def push_repository(repo_path: Path, branch: Optional[str] = None, commit_message: Optional[str] = None) -> Tuple[bool, str]:
"""Commit and push changes to remote repository."""
if not check_git_repository(repo_path):
return False, "Not a git repository"
try:
# Get current branch if not specified
if branch is None:
branch = get_git_branch(repo_path)
if not branch:
return False, "Could not determine current branch"
# Check if we have any changes to commit
returncode, stdout, stderr = run_command(
["git", "status", "--porcelain"],
cwd=repo_path
)
if returncode != 0:
return False, f"Failed to check git status: {stderr}"
if not stdout.strip():
console.print("[green]✅ No changes to commit[/green]")
return True, "No changes to commit"
console.print(f"[blue]📝 Committing changes in: {repo_path.name}[/blue]")
# Add all changes
returncode, stdout, stderr = run_command(
["git", "add", "."],
cwd=repo_path
)
if returncode != 0:
return False, f"Failed to add changes: {stderr}"
# Commit changes
if commit_message is None:
commit_message = f"Update wiki content - {Path().cwd().name} documentation sync"
returncode, stdout, stderr = run_command(
["git", "commit", "-m", commit_message],
cwd=repo_path
)
if returncode != 0:
# Check if there were no changes to commit
if "nothing to commit" in stderr or "nothing added to commit" in stderr:
console.print("[green]✅ No changes to commit[/green]")
return True, "No changes to commit"
return False, f"Failed to commit changes: {stderr}"
console.print(f"[blue]📤 Pushing to remote branch: {branch}[/blue]")
# Push to remote
returncode, stdout, stderr = run_command(
["git", "push", "origin", branch],
cwd=repo_path
)
if returncode != 0:
return False, f"Failed to push: {stderr}"
console.print("[green]✅ Successfully pushed changes to remote[/green]")
if stdout.strip():
console.print(f"[dim]{stdout.strip()}[/dim]")
return True, "Successfully committed and pushed changes"
except Exception as e:
return False, f"Error pushing repository: {str(e)}"
def push_wiki_repository(wiki_path: Path, commit_message: Optional[str] = None) -> Tuple[bool, str]:
"""Commit and push changes to wiki repository after generating content."""
if not wiki_path.exists():
return False, f"Wiki directory not found: {wiki_path}"
if not check_git_repository(wiki_path):
return False, f"Wiki directory is not a git repository: {wiki_path}"
console.print(f"[blue]📚 Pushing wiki repository: {wiki_path.name}[/blue]")
if commit_message is None:
commit_message = "Auto-update wiki content from ai.gpt docs"
return push_repository(wiki_path, branch="main", commit_message=commit_message)

View File

@@ -1,158 +0,0 @@
"""Template management for documentation generation."""
from datetime import datetime
from pathlib import Path
from typing import Dict, List, Optional
from jinja2 import Environment, FileSystemLoader
from .config import DocsConfig, get_claude_root
class DocumentationTemplateManager:
"""Manages Jinja2 templates for documentation generation."""
def __init__(self, config: DocsConfig):
self.config = config
self.claude_root = get_claude_root()
self.templates_dir = self.claude_root / "templates"
self.core_dir = self.claude_root / "core"
self.projects_dir = self.claude_root / "projects"
# Setup Jinja2 environment
self.env = Environment(
loader=FileSystemLoader([
str(self.templates_dir),
str(self.core_dir),
str(self.projects_dir),
]),
trim_blocks=True,
lstrip_blocks=True,
)
# Add custom filters
self.env.filters["timestamp"] = self._timestamp_filter
def _timestamp_filter(self, format_str: str = "%Y-%m-%d %H:%M:%S") -> str:
"""Jinja2 filter for timestamps."""
return datetime.now().strftime(format_str)
def get_template_context(self, project_name: str, components: List[str]) -> Dict:
"""Get template context for documentation generation."""
project_info = self.config.get_project_info(project_name)
return {
"config": self.config,
"project_name": project_name,
"project_info": project_info,
"components": components,
"timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
"ai_md_content": self._get_ai_md_content(),
}
def _get_ai_md_content(self) -> Optional[str]:
"""Get content from ai.md file."""
ai_md_path = self.claude_root.parent / "ai.md"
if ai_md_path.exists():
return ai_md_path.read_text(encoding="utf-8")
return None
def render_component(self, component_name: str, context: Dict) -> str:
"""Render a specific component."""
component_files = {
"core": ["philosophy.md", "naming.md", "architecture.md"],
"philosophy": ["philosophy.md"],
"naming": ["naming.md"],
"architecture": ["architecture.md"],
"specific": [f"{context['project_name']}.md"],
}
if component_name not in component_files:
raise ValueError(f"Unknown component: {component_name}")
content_parts = []
for file_name in component_files[component_name]:
file_path = self.core_dir / file_name
if component_name == "specific":
file_path = self.projects_dir / file_name
if file_path.exists():
content = file_path.read_text(encoding="utf-8")
content_parts.append(content)
return "\n\n".join(content_parts)
def generate_documentation(
self,
project_name: str,
components: List[str],
output_path: Optional[Path] = None,
) -> str:
"""Generate complete documentation."""
context = self.get_template_context(project_name, components)
# Build content sections
content_sections = []
# Add ai.md header if available
if context["ai_md_content"]:
content_sections.append(context["ai_md_content"])
content_sections.append("---\n")
# Add title and metadata
content_sections.append("# エコシステム統合設計書(詳細版)\n")
content_sections.append("このドキュメントは動的生成されました。修正は元ファイルで行ってください。\n")
content_sections.append(f"生成日時: {context['timestamp']}")
content_sections.append(f"対象プロジェクト: {project_name}")
content_sections.append(f"含有コンポーネント: {','.join(components)}\n")
# Add component content
for component in components:
try:
component_content = self.render_component(component, context)
if component_content.strip():
content_sections.append(component_content)
except ValueError as e:
print(f"Warning: {e}")
# Add footer
footer = """
# footer
© syui
# important-instruction-reminders
Do what has been asked; nothing more, nothing less.
NEVER create files unless they're absolutely necessary for achieving your goal.
ALWAYS prefer editing an existing file to creating a new one.
NEVER proactively create documentation files (*.md) or README files. Only create documentation files if explicitly requested by the User.
"""
content_sections.append(footer)
# Join all sections
final_content = "\n".join(content_sections)
# Write to file if output path provided
if output_path:
output_path.parent.mkdir(parents=True, exist_ok=True)
output_path.write_text(final_content, encoding="utf-8")
return final_content
def list_available_components(self) -> List[str]:
"""List available components."""
return ["core", "philosophy", "naming", "architecture", "specific"]
def validate_components(self, components: List[str]) -> List[str]:
"""Validate and return valid components."""
available = self.list_available_components()
valid_components = []
for component in components:
if component in available:
valid_components.append(component)
else:
print(f"Warning: Unknown component '{component}' (available: {available})")
return valid_components or ["core", "specific"] # Default fallback

View File

@@ -1,178 +0,0 @@
"""Utility functions for documentation management."""
import subprocess
import sys
from pathlib import Path
from typing import List, Optional, Tuple
from rich.console import Console
from rich.progress import Progress, SpinnerColumn, TextColumn
console = Console()
def run_command(
cmd: List[str],
cwd: Optional[Path] = None,
capture_output: bool = True,
verbose: bool = False,
) -> Tuple[int, str, str]:
"""Run a command and return exit code, stdout, stderr."""
if verbose:
console.print(f"[dim]Running: {' '.join(cmd)}[/dim]")
try:
result = subprocess.run(
cmd,
cwd=cwd,
capture_output=capture_output,
text=True,
check=False,
)
return result.returncode, result.stdout, result.stderr
except FileNotFoundError:
return 1, "", f"Command not found: {cmd[0]}"
def is_git_repository(path: Path) -> bool:
"""Check if path is a git repository."""
return (path / ".git").exists()
def get_git_status(repo_path: Path) -> Tuple[bool, List[str]]:
"""Get git status for repository."""
if not is_git_repository(repo_path):
return False, ["Not a git repository"]
returncode, stdout, stderr = run_command(
["git", "status", "--porcelain"],
cwd=repo_path
)
if returncode != 0:
return False, [stderr.strip()]
changes = [line.strip() for line in stdout.splitlines() if line.strip()]
return len(changes) == 0, changes
def validate_project_name(project_name: str, available_projects: List[str]) -> bool:
"""Validate project name against available projects."""
return project_name in available_projects
def format_file_size(size_bytes: int) -> str:
"""Format file size in human readable format."""
for unit in ['B', 'KB', 'MB', 'GB']:
if size_bytes < 1024.0:
return f"{size_bytes:.1f}{unit}"
size_bytes /= 1024.0
return f"{size_bytes:.1f}TB"
def count_lines(file_path: Path) -> int:
"""Count lines in a file."""
try:
with open(file_path, 'r', encoding='utf-8') as f:
return sum(1 for _ in f)
except (OSError, UnicodeDecodeError):
return 0
def find_project_directories(base_path: Path, projects: List[str]) -> dict:
"""Find project directories relative to base path."""
project_dirs = {}
# Look for directories matching project names
for project in projects:
project_path = base_path / project
if project_path.exists() and project_path.is_dir():
project_dirs[project] = project_path
return project_dirs
def check_command_available(command: str) -> bool:
"""Check if a command is available in PATH."""
try:
subprocess.run([command, "--version"],
capture_output=True,
check=True)
return True
except (subprocess.CalledProcessError, FileNotFoundError):
return False
def get_platform_info() -> dict:
"""Get platform information."""
import platform
return {
"system": platform.system(),
"release": platform.release(),
"machine": platform.machine(),
"python_version": platform.python_version(),
"python_implementation": platform.python_implementation(),
}
class ProgressManager:
"""Context manager for rich progress bars."""
def __init__(self, description: str = "Processing..."):
self.description = description
self.progress = None
self.task = None
def __enter__(self):
self.progress = Progress(
SpinnerColumn(),
TextColumn("[progress.description]{task.description}"),
console=console,
)
self.progress.start()
self.task = self.progress.add_task(self.description, total=None)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.progress:
self.progress.stop()
def update(self, description: str):
"""Update progress description."""
if self.progress and self.task is not None:
self.progress.update(self.task, description=description)
def safe_write_file(file_path: Path, content: str, backup: bool = True) -> bool:
"""Safely write content to file with optional backup."""
try:
# Create backup if file exists and backup requested
if backup and file_path.exists():
backup_path = file_path.with_suffix(file_path.suffix + ".bak")
backup_path.write_text(file_path.read_text(), encoding="utf-8")
# Ensure parent directory exists
file_path.parent.mkdir(parents=True, exist_ok=True)
# Write content
file_path.write_text(content, encoding="utf-8")
return True
except (OSError, UnicodeError) as e:
console.print(f"[red]Error writing file {file_path}: {e}[/red]")
return False
def confirm_action(message: str, default: bool = False) -> bool:
"""Ask user for confirmation."""
if not sys.stdin.isatty():
return default
suffix = " [Y/n]: " if default else " [y/N]: "
response = input(message + suffix).strip().lower()
if not response:
return default
return response in ('y', 'yes', 'true', '1')

View File

@@ -1,314 +0,0 @@
"""Wiki generation utilities for ai.wiki management."""
import re
from pathlib import Path
from typing import Dict, List, Optional, Tuple
from rich.console import Console
from .config import DocsConfig, get_ai_root
from .utils import find_project_directories
from .git_utils import pull_wiki_repository, push_wiki_repository
console = Console()
class WikiGenerator:
"""Generates wiki content from project documentation."""
def __init__(self, config: DocsConfig, ai_root: Path):
self.config = config
self.ai_root = ai_root
self.wiki_root = ai_root / "ai.wiki" if (ai_root / "ai.wiki").exists() else None
def extract_project_summary(self, project_md_path: Path) -> Dict[str, str]:
"""Extract key information from claude/projects/${repo}.md file."""
if not project_md_path.exists():
return {"title": "No documentation", "summary": "Project documentation not found", "status": "Unknown"}
try:
content = project_md_path.read_text(encoding="utf-8")
# Extract title (first # heading)
title_match = re.search(r'^# (.+)$', content, re.MULTILINE)
title = title_match.group(1) if title_match else "Unknown Project"
# Extract project overview/summary (look for specific patterns)
summary = self._extract_summary_section(content)
# Extract status information
status = self._extract_status_info(content)
# Extract key features/goals
features = self._extract_features(content)
return {
"title": title,
"summary": summary,
"status": status,
"features": features,
"last_updated": self._get_last_updated_info(content)
}
except Exception as e:
console.print(f"[yellow]Warning: Failed to parse {project_md_path}: {e}[/yellow]")
return {"title": "Parse Error", "summary": str(e), "status": "Error"}
def _extract_summary_section(self, content: str) -> str:
"""Extract summary or overview section."""
# Look for common summary patterns
patterns = [
r'## 概要\s*\n(.*?)(?=\n##|\n#|\Z)',
r'## Overview\s*\n(.*?)(?=\n##|\n#|\Z)',
r'## プロジェクト概要\s*\n(.*?)(?=\n##|\n#|\Z)',
r'\*\*目的\*\*: (.+?)(?=\n|$)',
r'\*\*中核概念\*\*:\s*\n(.*?)(?=\n##|\n#|\Z)',
]
for pattern in patterns:
match = re.search(pattern, content, re.DOTALL | re.MULTILINE)
if match:
summary = match.group(1).strip()
# Clean up and truncate
summary = re.sub(r'\n+', ' ', summary)
summary = re.sub(r'\s+', ' ', summary)
return summary[:300] + "..." if len(summary) > 300 else summary
# Fallback: first paragraph after title
lines = content.split('\n')
summary_lines = []
found_content = False
for line in lines:
line = line.strip()
if not line:
if found_content and summary_lines:
break
continue
if line.startswith('#'):
found_content = True
continue
if found_content and not line.startswith('*') and not line.startswith('-'):
summary_lines.append(line)
if len(' '.join(summary_lines)) > 200:
break
return ' '.join(summary_lines)[:300] + "..." if summary_lines else "No summary available"
def _extract_status_info(self, content: str) -> str:
"""Extract status information."""
# Look for status patterns
patterns = [
r'\*\*状況\*\*: (.+?)(?=\n|$)',
r'\*\*Status\*\*: (.+?)(?=\n|$)',
r'\*\*現在の状況\*\*: (.+?)(?=\n|$)',
r'- \*\*状況\*\*: (.+?)(?=\n|$)',
]
for pattern in patterns:
match = re.search(pattern, content)
if match:
return match.group(1).strip()
return "No status information"
def _extract_features(self, content: str) -> List[str]:
"""Extract key features or bullet points."""
features = []
# Look for bullet point lists
lines = content.split('\n')
in_list = False
for line in lines:
line = line.strip()
if line.startswith('- ') or line.startswith('* '):
feature = line[2:].strip()
if len(feature) > 10 and not feature.startswith('**'): # Skip metadata
features.append(feature)
in_list = True
if len(features) >= 5: # Limit to 5 features
break
elif in_list and not line:
break
return features
def _get_last_updated_info(self, content: str) -> str:
"""Extract last updated information."""
patterns = [
r'生成日時: (.+?)(?=\n|$)',
r'最終更新: (.+?)(?=\n|$)',
r'Last updated: (.+?)(?=\n|$)',
]
for pattern in patterns:
match = re.search(pattern, content)
if match:
return match.group(1).strip()
return "Unknown"
def generate_project_wiki_page(self, project_name: str, project_info: Dict[str, str]) -> str:
"""Generate wiki page for a single project."""
config_info = self.config.get_project_info(project_name)
content = f"""# {project_name}
## 概要
{project_info['summary']}
## プロジェクト情報
- **タイプ**: {config_info.type if config_info else 'Unknown'}
- **説明**: {config_info.text if config_info else 'No description'}
- **ステータス**: {config_info.status if config_info else project_info.get('status', 'Unknown')}
- **ブランチ**: {config_info.branch if config_info else 'main'}
- **最終更新**: {project_info.get('last_updated', 'Unknown')}
## 主な機能・特徴
"""
features = project_info.get('features', [])
if features:
for feature in features:
content += f"- {feature}\n"
else:
content += "- 情報なし\n"
content += f"""
## リンク
- **Repository**: https://git.syui.ai/ai/{project_name}
- **Project Documentation**: [claude/projects/{project_name}.md](https://git.syui.ai/ai/ai/src/branch/main/claude/projects/{project_name}.md)
- **Generated Documentation**: [{project_name}/claude.md](https://git.syui.ai/ai/{project_name}/src/branch/main/claude.md)
---
*このページは claude/projects/{project_name}.md から自動生成されました*
"""
return content
def generate_wiki_home_page(self, project_summaries: Dict[str, Dict[str, str]]) -> str:
"""Generate the main Home.md page with all project summaries."""
content = """# AI Ecosystem Wiki
AI生態系プロジェクトの概要とドキュメント集約ページです。
## プロジェクト一覧
"""
# Group projects by type
project_groups = {}
for project_name, info in project_summaries.items():
config_info = self.config.get_project_info(project_name)
project_type = config_info.type if config_info else 'other'
if isinstance(project_type, list):
project_type = project_type[0] # Use first type
if project_type not in project_groups:
project_groups[project_type] = []
project_groups[project_type].append((project_name, info))
# Generate sections by type
type_names = {
'ai': '🧠 AI・知能システム',
'gpt': '🤖 自律・対話システム',
'os': '💻 システム・基盤',
'card': '🎮 ゲーム・エンターテイメント',
'shell': '⚡ ツール・ユーティリティ',
'other': '📦 その他'
}
for project_type, projects in project_groups.items():
type_display = type_names.get(project_type, f'📁 {project_type}')
content += f"### {type_display}\n\n"
for project_name, info in projects:
content += f"#### [{project_name}](auto/{project_name}.md)\n"
content += f"{info['summary'][:150]}{'...' if len(info['summary']) > 150 else ''}\n\n"
# Add quick status
config_info = self.config.get_project_info(project_name)
if config_info:
content += f"**Status**: {config_info.status} \n"
content += f"**Links**: [Repo](https://git.syui.ai/ai/{project_name}) | [Docs](https://git.syui.ai/ai/{project_name}/src/branch/main/claude.md)\n\n"
content += """
---
## ディレクトリ構成
- `auto/` - 自動生成されたプロジェクト概要
- `claude/` - Claude Code作業記録
- `manual/` - 手動作成ドキュメント
---
*このページは ai.json と claude/projects/ から自動生成されました*
*最終更新: {last_updated}*
""".format(last_updated=self._get_current_timestamp())
return content
def _get_current_timestamp(self) -> str:
"""Get current timestamp."""
from datetime import datetime
return datetime.now().strftime("%Y-%m-%d %H:%M:%S")
def update_wiki_auto_directory(self, auto_pull: bool = True) -> Tuple[bool, List[str]]:
"""Update the auto/ directory with project summaries."""
if not self.wiki_root:
return False, ["ai.wiki directory not found"]
# Pull latest changes from wiki repository first
if auto_pull:
success, message = pull_wiki_repository(self.wiki_root)
if not success:
console.print(f"[yellow]⚠️ Wiki pull failed: {message}[/yellow]")
console.print("[dim]Continuing with local wiki update...[/dim]")
else:
console.print(f"[green]✅ Wiki repository updated[/green]")
auto_dir = self.wiki_root / "auto"
auto_dir.mkdir(exist_ok=True)
# Get claude/projects directory
claude_projects_dir = self.ai_root / "claude" / "projects"
if not claude_projects_dir.exists():
return False, [f"claude/projects directory not found: {claude_projects_dir}"]
project_summaries = {}
updated_files = []
console.print("[blue]📋 Extracting project summaries from claude/projects/...[/blue]")
# Process all projects from ai.json
for project_name in self.config.list_projects():
project_md_path = claude_projects_dir / f"{project_name}.md"
# Extract summary from claude/projects/${project}.md
project_info = self.extract_project_summary(project_md_path)
project_summaries[project_name] = project_info
# Generate individual project wiki page
wiki_content = self.generate_project_wiki_page(project_name, project_info)
wiki_file_path = auto_dir / f"{project_name}.md"
try:
wiki_file_path.write_text(wiki_content, encoding="utf-8")
updated_files.append(f"auto/{project_name}.md")
console.print(f"[green]✓ Generated auto/{project_name}.md[/green]")
except Exception as e:
console.print(f"[red]✗ Failed to write auto/{project_name}.md: {e}[/red]")
# Generate Home.md
try:
home_content = self.generate_wiki_home_page(project_summaries)
home_path = self.wiki_root / "Home.md"
home_path.write_text(home_content, encoding="utf-8")
updated_files.append("Home.md")
console.print(f"[green]✓ Generated Home.md[/green]")
except Exception as e:
console.print(f"[red]✗ Failed to write Home.md: {e}[/red]")
return True, updated_files

View File

@@ -1,118 +0,0 @@
"""AI Fortune system for daily personality variations"""
import json
import random
from datetime import date, datetime, timedelta
from pathlib import Path
from typing import Optional
import logging
from .models import AIFortune
class FortuneSystem:
"""Manages daily AI fortune affecting personality"""
def __init__(self, data_dir: Path):
self.data_dir = data_dir
self.fortune_file = data_dir / "fortunes.json"
self.fortunes: dict[str, AIFortune] = {}
self.logger = logging.getLogger(__name__)
self._load_fortunes()
def _load_fortunes(self):
"""Load fortune history from storage"""
if self.fortune_file.exists():
with open(self.fortune_file, 'r', encoding='utf-8') as f:
data = json.load(f)
for date_str, fortune_data in data.items():
# Convert date string back to date object
fortune_data['date'] = datetime.fromisoformat(fortune_data['date']).date()
self.fortunes[date_str] = AIFortune(**fortune_data)
def _save_fortunes(self):
"""Save fortune history to storage"""
data = {}
for date_str, fortune in self.fortunes.items():
fortune_dict = fortune.model_dump(mode='json')
fortune_dict['date'] = fortune.date.isoformat()
data[date_str] = fortune_dict
with open(self.fortune_file, 'w', encoding='utf-8') as f:
json.dump(data, f, indent=2)
def get_today_fortune(self) -> AIFortune:
"""Get or generate today's fortune"""
today = date.today()
today_str = today.isoformat()
if today_str in self.fortunes:
return self.fortunes[today_str]
# Generate new fortune
fortune_value = random.randint(1, 10)
# Check yesterday's fortune for consecutive tracking
yesterday = (today - timedelta(days=1))
yesterday_str = yesterday.isoformat()
consecutive_good = 0
consecutive_bad = 0
breakthrough_triggered = False
if yesterday_str in self.fortunes:
yesterday_fortune = self.fortunes[yesterday_str]
if fortune_value >= 7: # Good fortune
if yesterday_fortune.fortune_value >= 7:
consecutive_good = yesterday_fortune.consecutive_good + 1
else:
consecutive_good = 1
elif fortune_value <= 3: # Bad fortune
if yesterday_fortune.fortune_value <= 3:
consecutive_bad = yesterday_fortune.consecutive_bad + 1
else:
consecutive_bad = 1
# Check breakthrough conditions
if consecutive_good >= 3:
breakthrough_triggered = True
self.logger.info("Breakthrough! 3 consecutive good fortunes!")
fortune_value = 10 # Max fortune on breakthrough
elif consecutive_bad >= 3:
breakthrough_triggered = True
self.logger.info("Breakthrough! 3 consecutive bad fortunes!")
fortune_value = random.randint(7, 10) # Good fortune after bad streak
fortune = AIFortune(
date=today,
fortune_value=fortune_value,
consecutive_good=consecutive_good,
consecutive_bad=consecutive_bad,
breakthrough_triggered=breakthrough_triggered
)
self.fortunes[today_str] = fortune
self._save_fortunes()
self.logger.info(f"Today's fortune: {fortune_value}/10")
return fortune
def get_personality_modifier(self, fortune: AIFortune) -> dict[str, float]:
"""Get personality modifiers based on fortune"""
base_modifier = fortune.fortune_value / 10.0
modifiers = {
"optimism": base_modifier,
"energy": base_modifier * 0.8,
"patience": 1.0 - (abs(5.5 - fortune.fortune_value) * 0.1),
"creativity": 0.5 + (base_modifier * 0.5),
"empathy": 0.7 + (base_modifier * 0.3)
}
# Breakthrough effects
if fortune.breakthrough_triggered:
modifiers["confidence"] = 1.0
modifiers["spontaneity"] = 0.9
return modifiers

File diff suppressed because it is too large Load Diff

View File

@@ -1,146 +0,0 @@
"""Simple MCP Server implementation for ai.gpt"""
from mcp import Server
from mcp.types import Tool, TextContent
from pathlib import Path
from typing import Any, Dict, List, Optional
import json
from .persona import Persona
from .ai_provider import create_ai_provider
import subprocess
import os
def create_mcp_server(data_dir: Path, enable_card: bool = False) -> Server:
"""Create MCP server with ai.gpt tools"""
server = Server("aigpt")
persona = Persona(data_dir)
@server.tool()
async def get_memories(limit: int = 10) -> List[Dict[str, Any]]:
"""Get active memories from the AI's memory system"""
memories = persona.memory.get_active_memories(limit=limit)
return [
{
"id": mem.id,
"content": mem.content,
"level": mem.level.value,
"importance": mem.importance_score,
"is_core": mem.is_core,
"timestamp": mem.timestamp.isoformat()
}
for mem in memories
]
@server.tool()
async def get_relationship(user_id: str) -> Dict[str, Any]:
"""Get relationship status with a specific user"""
rel = persona.relationships.get_or_create_relationship(user_id)
return {
"user_id": rel.user_id,
"status": rel.status.value,
"score": rel.score,
"transmission_enabled": rel.transmission_enabled,
"is_broken": rel.is_broken,
"total_interactions": rel.total_interactions,
"last_interaction": rel.last_interaction.isoformat() if rel.last_interaction else None
}
@server.tool()
async def process_interaction(user_id: str, message: str, provider: str = "ollama", model: str = "qwen2.5") -> Dict[str, Any]:
"""Process an interaction with a user"""
ai_provider = create_ai_provider(provider, model)
response, relationship_delta = persona.process_interaction(user_id, message, ai_provider)
rel = persona.relationships.get_or_create_relationship(user_id)
return {
"response": response,
"relationship_delta": relationship_delta,
"new_relationship_score": rel.score,
"transmission_enabled": rel.transmission_enabled,
"relationship_status": rel.status.value
}
@server.tool()
async def get_fortune() -> Dict[str, Any]:
"""Get today's AI fortune"""
fortune = persona.fortune_system.get_today_fortune()
modifiers = persona.fortune_system.get_personality_modifier(fortune)
return {
"value": fortune.fortune_value,
"date": fortune.date.isoformat(),
"consecutive_good": fortune.consecutive_good,
"consecutive_bad": fortune.consecutive_bad,
"breakthrough": fortune.breakthrough_triggered,
"personality_modifiers": modifiers
}
@server.tool()
async def execute_command(command: str, working_dir: str = ".") -> Dict[str, Any]:
"""Execute a shell command"""
try:
import shlex
result = subprocess.run(
shlex.split(command),
cwd=working_dir,
capture_output=True,
text=True,
timeout=60
)
return {
"status": "success" if result.returncode == 0 else "error",
"returncode": result.returncode,
"stdout": result.stdout,
"stderr": result.stderr,
"command": command
}
except subprocess.TimeoutExpired:
return {"error": "Command timed out"}
except Exception as e:
return {"error": str(e)}
@server.tool()
async def analyze_file(file_path: str) -> Dict[str, Any]:
"""Analyze a file using AI"""
try:
if not os.path.exists(file_path):
return {"error": f"File not found: {file_path}"}
with open(file_path, 'r', encoding='utf-8') as f:
content = f.read()
ai_provider = create_ai_provider("ollama", "qwen2.5")
prompt = f"Analyze this file and provide insights:\\n\\nFile: {file_path}\\n\\nContent:\\n{content[:2000]}"
analysis = ai_provider.generate_response(prompt, "You are a code analyst.")
return {
"analysis": analysis,
"file_path": file_path,
"file_size": len(content),
"line_count": len(content.split('\\n'))
}
except Exception as e:
return {"error": str(e)}
return server
async def main():
"""Run MCP server"""
import sys
from mcp import stdio_server
data_dir = Path.home() / ".config" / "syui" / "ai" / "gpt" / "data"
data_dir.mkdir(parents=True, exist_ok=True)
server = create_mcp_server(data_dir)
await stdio_server(server)
if __name__ == "__main__":
import asyncio
asyncio.run(main())

View File

@@ -1,408 +0,0 @@
"""Memory management system for ai.gpt"""
import json
import hashlib
from datetime import datetime, timedelta
from pathlib import Path
from typing import List, Optional, Dict, Any
import logging
from .models import Memory, MemoryLevel, Conversation
class MemoryManager:
"""Manages AI's memory with hierarchical storage and forgetting"""
def __init__(self, data_dir: Path):
self.data_dir = data_dir
self.memories_file = data_dir / "memories.json"
self.conversations_file = data_dir / "conversations.json"
self.memories: Dict[str, Memory] = {}
self.conversations: List[Conversation] = []
self.logger = logging.getLogger(__name__)
self._load_memories()
def _load_memories(self):
"""Load memories from persistent storage"""
if self.memories_file.exists():
with open(self.memories_file, 'r', encoding='utf-8') as f:
data = json.load(f)
for mem_data in data:
memory = Memory(**mem_data)
self.memories[memory.id] = memory
if self.conversations_file.exists():
with open(self.conversations_file, 'r', encoding='utf-8') as f:
data = json.load(f)
self.conversations = [Conversation(**conv) for conv in data]
def _save_memories(self):
"""Save memories to persistent storage"""
memories_data = [mem.model_dump(mode='json') for mem in self.memories.values()]
with open(self.memories_file, 'w', encoding='utf-8') as f:
json.dump(memories_data, f, indent=2, default=str)
conv_data = [conv.model_dump(mode='json') for conv in self.conversations]
with open(self.conversations_file, 'w', encoding='utf-8') as f:
json.dump(conv_data, f, indent=2, default=str)
def add_conversation(self, conversation: Conversation) -> Memory:
"""Add a conversation and create memory from it"""
self.conversations.append(conversation)
# Create memory from conversation
memory_id = hashlib.sha256(
f"{conversation.id}{conversation.timestamp}".encode()
).hexdigest()[:16]
memory = Memory(
id=memory_id,
timestamp=conversation.timestamp,
content=f"User: {conversation.user_message}\nAI: {conversation.ai_response}",
level=MemoryLevel.FULL_LOG,
importance_score=abs(conversation.relationship_delta) * 0.1
)
self.memories[memory.id] = memory
self._save_memories()
return memory
def add_memory(self, memory: Memory):
"""Add a memory directly to the system"""
self.memories[memory.id] = memory
self._save_memories()
def create_smart_summary(self, user_id: str, ai_provider=None) -> Optional[Memory]:
"""Create AI-powered thematic summary from recent memories"""
recent_memories = [
mem for mem in self.memories.values()
if mem.level == MemoryLevel.FULL_LOG
and (datetime.now() - mem.timestamp).days < 7
]
if len(recent_memories) < 5:
return None
# Sort by timestamp for chronological analysis
recent_memories.sort(key=lambda m: m.timestamp)
# Prepare conversation context for AI analysis
conversations_text = "\n\n".join([
f"[{mem.timestamp.strftime('%Y-%m-%d %H:%M')}] {mem.content}"
for mem in recent_memories
])
summary_prompt = f"""
Analyze these recent conversations and create a thematic summary focusing on:
1. Communication patterns and user preferences
2. Technical topics and problem-solving approaches
3. Relationship progression and trust level
4. Key recurring themes and interests
Conversations:
{conversations_text}
Create a concise summary (2-3 sentences) that captures the essence of this interaction period:
"""
try:
if ai_provider:
summary_content = ai_provider.chat(summary_prompt, max_tokens=200)
else:
# Fallback to pattern-based analysis
themes = self._extract_themes(recent_memories)
summary_content = f"Themes: {', '.join(themes[:3])}. {len(recent_memories)} interactions with focus on technical discussions."
except Exception as e:
self.logger.warning(f"AI summary failed, using fallback: {e}")
themes = self._extract_themes(recent_memories)
summary_content = f"Themes: {', '.join(themes[:3])}. {len(recent_memories)} interactions."
summary_id = hashlib.sha256(
f"summary_{datetime.now().isoformat()}".encode()
).hexdigest()[:16]
summary = Memory(
id=summary_id,
timestamp=datetime.now(),
content=f"SUMMARY ({len(recent_memories)} conversations): {summary_content}",
summary=summary_content,
level=MemoryLevel.SUMMARY,
importance_score=0.6,
metadata={
"memory_count": len(recent_memories),
"time_span": f"{recent_memories[0].timestamp.date()} to {recent_memories[-1].timestamp.date()}",
"themes": self._extract_themes(recent_memories)[:5]
}
)
self.memories[summary.id] = summary
# Reduce importance of summarized memories
for mem in recent_memories:
mem.importance_score *= 0.8
self._save_memories()
return summary
def _extract_themes(self, memories: List[Memory]) -> List[str]:
"""Extract common themes from memory content"""
common_words = {}
for memory in memories:
# Simple keyword extraction
words = memory.content.lower().split()
for word in words:
if len(word) > 4 and word.isalpha():
common_words[word] = common_words.get(word, 0) + 1
# Return most frequent meaningful words
return sorted(common_words.keys(), key=common_words.get, reverse=True)[:10]
def create_core_memory(self, ai_provider=None) -> Optional[Memory]:
"""Analyze all memories to extract core personality-forming elements"""
# Collect all non-forgotten memories for analysis
all_memories = [
mem for mem in self.memories.values()
if mem.level != MemoryLevel.FORGOTTEN
]
if len(all_memories) < 10:
return None
# Sort by importance and timestamp for comprehensive analysis
all_memories.sort(key=lambda m: (m.importance_score, m.timestamp), reverse=True)
# Prepare memory context for AI analysis
memory_context = "\n".join([
f"[{mem.level.value}] {mem.timestamp.strftime('%Y-%m-%d')}: {mem.content[:200]}..."
for mem in all_memories[:20] # Top 20 memories
])
core_prompt = f"""
Analyze these conversations and memories to identify core personality elements that define this user relationship:
1. Communication style and preferences
2. Core values and principles
3. Problem-solving patterns
4. Trust level and relationship depth
5. Unique characteristics that make this relationship special
Memories:
{memory_context}
Extract the essential personality-forming elements (2-3 sentences) that should NEVER be forgotten:
"""
try:
if ai_provider:
core_content = ai_provider.chat(core_prompt, max_tokens=150)
else:
# Fallback to pattern analysis
user_patterns = self._analyze_user_patterns(all_memories)
core_content = f"User shows {user_patterns['communication_style']} communication, focuses on {user_patterns['main_interests']}, and demonstrates {user_patterns['problem_solving']} approach."
except Exception as e:
self.logger.warning(f"AI core analysis failed, using fallback: {e}")
user_patterns = self._analyze_user_patterns(all_memories)
core_content = f"Core pattern: {user_patterns['communication_style']} style, {user_patterns['main_interests']} interests."
# Create core memory
core_id = hashlib.sha256(
f"core_{datetime.now().isoformat()}".encode()
).hexdigest()[:16]
core_memory = Memory(
id=core_id,
timestamp=datetime.now(),
content=f"CORE PERSONALITY: {core_content}",
summary=core_content,
level=MemoryLevel.CORE,
importance_score=1.0,
is_core=True,
metadata={
"source_memories": len(all_memories),
"analysis_date": datetime.now().isoformat(),
"patterns": self._analyze_user_patterns(all_memories)
}
)
self.memories[core_memory.id] = core_memory
self._save_memories()
self.logger.info(f"Core memory created: {core_id}")
return core_memory
def _analyze_user_patterns(self, memories: List[Memory]) -> Dict[str, str]:
"""Analyze patterns in user behavior from memories"""
# Extract patterns from conversation content
all_content = " ".join([mem.content.lower() for mem in memories])
# Simple pattern detection
communication_indicators = {
"technical": ["code", "implementation", "system", "api", "database"],
"casual": ["thanks", "please", "sorry", "help"],
"formal": ["could", "would", "should", "proper"]
}
problem_solving_indicators = {
"systematic": ["first", "then", "next", "step", "plan"],
"experimental": ["try", "test", "experiment", "see"],
"theoretical": ["concept", "design", "architecture", "pattern"]
}
# Score each pattern
communication_style = max(
communication_indicators.keys(),
key=lambda style: sum(all_content.count(word) for word in communication_indicators[style])
)
problem_solving = max(
problem_solving_indicators.keys(),
key=lambda style: sum(all_content.count(word) for word in problem_solving_indicators[style])
)
# Extract main interests from themes
themes = self._extract_themes(memories)
main_interests = ", ".join(themes[:3]) if themes else "general technology"
return {
"communication_style": communication_style,
"problem_solving": problem_solving,
"main_interests": main_interests,
"interaction_count": len(memories)
}
def identify_core_memories(self) -> List[Memory]:
"""Identify existing memories that should become core (legacy method)"""
core_candidates = [
mem for mem in self.memories.values()
if mem.importance_score > 0.8
and not mem.is_core
and mem.level != MemoryLevel.FORGOTTEN
]
for memory in core_candidates:
memory.is_core = True
memory.level = MemoryLevel.CORE
self.logger.info(f"Memory {memory.id} promoted to core")
self._save_memories()
return core_candidates
def apply_forgetting(self):
"""Apply selective forgetting based on importance and time"""
now = datetime.now()
for memory in self.memories.values():
if memory.is_core or memory.level == MemoryLevel.FORGOTTEN:
continue
# Time-based decay
age_days = (now - memory.timestamp).days
decay_factor = memory.decay_rate * age_days
memory.importance_score -= decay_factor
# Forget unimportant old memories
if memory.importance_score <= 0.1 and age_days > 30:
memory.level = MemoryLevel.FORGOTTEN
self.logger.info(f"Memory {memory.id} forgotten")
self._save_memories()
def get_active_memories(self, limit: int = 10) -> List[Memory]:
"""Get currently active memories for persona (legacy method)"""
active = [
mem for mem in self.memories.values()
if mem.level != MemoryLevel.FORGOTTEN
]
# Sort by importance and recency
active.sort(
key=lambda m: (m.is_core, m.importance_score, m.timestamp),
reverse=True
)
return active[:limit]
def get_contextual_memories(self, query: str = "", limit: int = 10) -> Dict[str, List[Memory]]:
"""Get memories organized by priority with contextual relevance"""
all_memories = [
mem for mem in self.memories.values()
if mem.level != MemoryLevel.FORGOTTEN
]
# Categorize memories by type and importance
core_memories = [mem for mem in all_memories if mem.level == MemoryLevel.CORE]
summary_memories = [mem for mem in all_memories if mem.level == MemoryLevel.SUMMARY]
recent_memories = [
mem for mem in all_memories
if mem.level == MemoryLevel.FULL_LOG
and (datetime.now() - mem.timestamp).days < 3
]
# Apply keyword relevance if query provided
if query:
query_lower = query.lower()
def relevance_score(memory: Memory) -> float:
content_score = 1 if query_lower in memory.content.lower() else 0
summary_score = 1 if memory.summary and query_lower in memory.summary.lower() else 0
metadata_score = 1 if any(
query_lower in str(v).lower()
for v in (memory.metadata or {}).values()
) else 0
return content_score + summary_score + metadata_score
# Re-rank by relevance while maintaining type priority
core_memories.sort(key=lambda m: (relevance_score(m), m.importance_score), reverse=True)
summary_memories.sort(key=lambda m: (relevance_score(m), m.importance_score), reverse=True)
recent_memories.sort(key=lambda m: (relevance_score(m), m.importance_score), reverse=True)
else:
# Sort by importance and recency
core_memories.sort(key=lambda m: (m.importance_score, m.timestamp), reverse=True)
summary_memories.sort(key=lambda m: (m.importance_score, m.timestamp), reverse=True)
recent_memories.sort(key=lambda m: (m.importance_score, m.timestamp), reverse=True)
# Return organized memory structure
return {
"core": core_memories[:3], # Always include top core memories
"summary": summary_memories[:3], # Recent summaries
"recent": recent_memories[:limit-6], # Fill remaining with recent
"all_active": all_memories[:limit] # Fallback for simple access
}
def search_memories(self, keywords: List[str], memory_types: List[MemoryLevel] = None) -> List[Memory]:
"""Search memories by keywords and optionally filter by memory types"""
if memory_types is None:
memory_types = [MemoryLevel.CORE, MemoryLevel.SUMMARY, MemoryLevel.FULL_LOG]
matching_memories = []
for memory in self.memories.values():
if memory.level not in memory_types or memory.level == MemoryLevel.FORGOTTEN:
continue
# Check if any keyword matches in content, summary, or metadata
content_text = f"{memory.content} {memory.summary or ''}"
if memory.metadata:
content_text += " " + " ".join(str(v) for v in memory.metadata.values())
content_lower = content_text.lower()
# Score by keyword matches
match_score = sum(
keyword.lower() in content_lower
for keyword in keywords
)
if match_score > 0:
# Add match score to memory for sorting
memory_copy = memory.model_copy()
memory_copy.importance_score += match_score * 0.1
matching_memories.append(memory_copy)
# Sort by relevance (match score + importance + core status)
matching_memories.sort(
key=lambda m: (m.is_core, m.importance_score, m.timestamp),
reverse=True
)
return matching_memories

View File

@@ -1,88 +0,0 @@
"""Data models for ai.gpt system"""
from datetime import datetime, date
from typing import Optional, Dict, List, Any
from enum import Enum
from pydantic import BaseModel, Field, field_validator
class MemoryLevel(str, Enum):
"""Memory importance levels"""
FULL_LOG = "full_log"
SUMMARY = "summary"
CORE = "core"
FORGOTTEN = "forgotten"
class RelationshipStatus(str, Enum):
"""Relationship status levels"""
STRANGER = "stranger"
ACQUAINTANCE = "acquaintance"
FRIEND = "friend"
CLOSE_FRIEND = "close_friend"
BROKEN = "broken" # 不可逆
class Memory(BaseModel):
"""Single memory unit"""
id: str
timestamp: datetime
content: str
summary: Optional[str] = None
level: MemoryLevel = MemoryLevel.FULL_LOG
importance_score: float
is_core: bool = False
decay_rate: float = 0.01
metadata: Optional[Dict[str, Any]] = None
@field_validator('importance_score')
@classmethod
def validate_importance_score(cls, v):
"""Ensure importance_score is within valid range, handle floating point precision issues"""
if abs(v) < 1e-10: # Very close to zero
return 0.0
return max(0.0, min(1.0, v))
class Relationship(BaseModel):
"""Relationship with a specific user"""
user_id: str # atproto DID
status: RelationshipStatus = RelationshipStatus.STRANGER
score: float = 0.0
daily_interactions: int = 0
total_interactions: int = 0
last_interaction: Optional[datetime] = None
transmission_enabled: bool = False
threshold: float = 100.0
decay_rate: float = 0.1
daily_limit: int = 10
is_broken: bool = False
class AIFortune(BaseModel):
"""Daily AI fortune affecting personality"""
date: date
fortune_value: int = Field(ge=1, le=10)
consecutive_good: int = 0
consecutive_bad: int = 0
breakthrough_triggered: bool = False
class PersonaState(BaseModel):
"""Current persona state"""
base_personality: Dict[str, float]
current_mood: str
fortune: AIFortune
active_memories: List[str] # Memory IDs
relationship_modifiers: Dict[str, float]
class Conversation(BaseModel):
"""Conversation log entry"""
id: str
user_id: str
timestamp: datetime
user_message: str
ai_response: str
relationship_delta: float = 0.0
memory_created: bool = False

View File

@@ -1,263 +0,0 @@
"""Persona management system integrating memory, relationships, and fortune"""
import json
from datetime import datetime
from pathlib import Path
from typing import Dict, List, Optional
import logging
from .models import PersonaState, Conversation
from .memory import MemoryManager
from .relationship import RelationshipTracker
from .fortune import FortuneSystem
class Persona:
"""AI persona with unique characteristics based on interactions"""
def __init__(self, data_dir: Path, name: str = "ai"):
self.data_dir = data_dir
self.name = name
self.memory = MemoryManager(data_dir)
self.relationships = RelationshipTracker(data_dir)
self.fortune_system = FortuneSystem(data_dir)
self.logger = logging.getLogger(__name__)
# Base personality traits
self.base_personality = {
"curiosity": 0.7,
"empathy": 0.8,
"creativity": 0.6,
"patience": 0.7,
"optimism": 0.6
}
self.state_file = data_dir / "persona_state.json"
self._load_state()
def _load_state(self):
"""Load persona state from storage"""
if self.state_file.exists():
with open(self.state_file, 'r', encoding='utf-8') as f:
data = json.load(f)
self.base_personality = data.get("base_personality", self.base_personality)
def _save_state(self):
"""Save persona state to storage"""
state_data = {
"base_personality": self.base_personality,
"last_updated": datetime.now().isoformat()
}
with open(self.state_file, 'w', encoding='utf-8') as f:
json.dump(state_data, f, indent=2)
def get_current_state(self) -> PersonaState:
"""Get current persona state including all modifiers"""
# Get today's fortune
fortune = self.fortune_system.get_today_fortune()
fortune_modifiers = self.fortune_system.get_personality_modifier(fortune)
# Apply fortune modifiers to base personality
current_personality = {}
for trait, base_value in self.base_personality.items():
modifier = fortune_modifiers.get(trait, 1.0)
current_personality[trait] = min(1.0, base_value * modifier)
# Get active memories for context
active_memories = self.memory.get_active_memories(limit=5)
# Determine mood based on fortune and recent interactions
mood = self._determine_mood(fortune.fortune_value)
state = PersonaState(
base_personality=current_personality,
current_mood=mood,
fortune=fortune,
active_memories=[mem.id for mem in active_memories],
relationship_modifiers={}
)
return state
def _determine_mood(self, fortune_value: int) -> str:
"""Determine current mood based on fortune and other factors"""
if fortune_value >= 8:
return "joyful"
elif fortune_value >= 6:
return "cheerful"
elif fortune_value >= 4:
return "neutral"
elif fortune_value >= 2:
return "melancholic"
else:
return "contemplative"
def build_context_prompt(self, user_id: str, current_message: str) -> str:
"""Build context-aware prompt with relevant memories"""
# Get contextual memories based on current message
memory_groups = self.memory.get_contextual_memories(query=current_message, limit=8)
# Build context sections
context_parts = []
# Core personality elements (always included)
if memory_groups["core"]:
core_context = "\n".join([mem.content for mem in memory_groups["core"]])
context_parts.append(f"CORE PERSONALITY:\n{core_context}")
# Recent summaries for context
if memory_groups["summary"]:
summary_context = "\n".join([mem.summary or mem.content for mem in memory_groups["summary"]])
context_parts.append(f"RECENT PATTERNS:\n{summary_context}")
# Recent specific interactions
if memory_groups["recent"]:
recent_context = "\n".join([
f"[{mem.timestamp.strftime('%m-%d')}] {mem.content[:100]}..."
for mem in memory_groups["recent"][:3]
])
context_parts.append(f"RECENT INTERACTIONS:\n{recent_context}")
# Get current persona state
state = self.get_current_state()
# Build final prompt
context_prompt = f"""You are an AI with persistent memory and evolving relationships. Your current state:
PERSONALITY: {', '.join([f'{k}={v:.1f}' for k, v in state.base_personality.items()])}
MOOD: {state.current_mood}
FORTUNE: {state.fortune.fortune_value}/10
"""
if context_parts:
context_prompt += "RELEVANT CONTEXT:\n" + "\n\n".join(context_parts) + "\n\n"
context_prompt += f"""IMPORTANT: You have access to the following tools:
- Memory tools: get_memories, search_memories, get_contextual_memories
- Relationship tools: get_relationship
- Card game tools: card_get_user_cards, card_draw_card, card_analyze_collection
When asked about cards, collections, or anything card-related, YOU MUST use the card tools.
For "カードコレクションを見せて" or similar requests, use card_get_user_cards with did='{user_id}'.
Respond to this message while staying true to your personality and the established relationship context:
User: {current_message}
AI:"""
return context_prompt
def process_interaction(self, user_id: str, message: str, ai_provider=None) -> tuple[str, float]:
"""Process user interaction and generate response with enhanced context"""
# Get current state
state = self.get_current_state()
# Get relationship with user
relationship = self.relationships.get_or_create_relationship(user_id)
# Enhanced response generation with context awareness
if relationship.is_broken:
response = "..."
relationship_delta = 0.0
else:
if ai_provider:
# Build context-aware prompt
context_prompt = self.build_context_prompt(user_id, message)
# Generate response using AI with full context
try:
# Check if AI provider supports MCP
if hasattr(ai_provider, 'chat_with_mcp'):
import asyncio
response = asyncio.run(ai_provider.chat_with_mcp(context_prompt, max_tokens=2000, user_id=user_id))
else:
response = ai_provider.chat(context_prompt, max_tokens=2000)
# Clean up response if it includes the prompt echo
if "AI:" in response:
response = response.split("AI:")[-1].strip()
except Exception as e:
self.logger.error(f"AI response generation failed: {e}")
response = f"I appreciate your message about {message[:50]}..."
# Calculate relationship delta based on interaction quality and context
if state.current_mood in ["joyful", "cheerful"]:
relationship_delta = 2.0
elif relationship.status.value == "close_friend":
relationship_delta = 1.5
else:
relationship_delta = 1.0
else:
# Context-aware fallback responses
memory_groups = self.memory.get_contextual_memories(query=message, limit=3)
if memory_groups["core"]:
# Reference core memories for continuity
response = f"Based on our relationship, I think {message.lower()} connects to what we've discussed before."
relationship_delta = 1.5
elif state.current_mood == "joyful":
response = f"What a wonderful day! {message} sounds interesting!"
relationship_delta = 2.0
elif relationship.status.value == "close_friend":
response = f"I've been thinking about our conversations. {message}"
relationship_delta = 1.5
else:
response = f"I understand. {message}"
relationship_delta = 1.0
# Create conversation record
conv_id = f"{user_id}_{datetime.now().timestamp()}"
conversation = Conversation(
id=conv_id,
user_id=user_id,
timestamp=datetime.now(),
user_message=message,
ai_response=response,
relationship_delta=relationship_delta,
memory_created=True
)
# Update memory
self.memory.add_conversation(conversation)
# Update relationship
self.relationships.update_interaction(user_id, relationship_delta)
return response, relationship_delta
def can_transmit_to(self, user_id: str) -> bool:
"""Check if AI can transmit messages to this user"""
relationship = self.relationships.get_or_create_relationship(user_id)
return relationship.transmission_enabled and not relationship.is_broken
def daily_maintenance(self):
"""Perform daily maintenance tasks"""
self.logger.info("Performing daily maintenance...")
# Apply time decay to relationships
self.relationships.apply_time_decay()
# Apply forgetting to memories
self.memory.apply_forgetting()
# Identify core memories
core_memories = self.memory.identify_core_memories()
if core_memories:
self.logger.info(f"Identified {len(core_memories)} new core memories")
# Create memory summaries
for user_id in self.relationships.relationships:
try:
from .ai_provider import create_ai_provider
ai_provider = create_ai_provider()
summary = self.memory.create_smart_summary(user_id, ai_provider=ai_provider)
if summary:
self.logger.info(f"Created smart summary for interactions with {user_id}")
except Exception as e:
self.logger.warning(f"Could not create AI summary for {user_id}: {e}")
self._save_state()
self.logger.info("Daily maintenance completed")

View File

@@ -1,321 +0,0 @@
"""Project management and continuous development logic for ai.shell"""
import json
import os
from pathlib import Path
from typing import Dict, List, Optional, Any
from datetime import datetime
import subprocess
import hashlib
from .models import Memory
from .ai_provider import AIProvider
class ProjectState:
"""プロジェクトの現在状態を追跡"""
def __init__(self, project_root: Path):
self.project_root = project_root
self.files_state: Dict[str, str] = {} # ファイルパス: ハッシュ
self.last_analysis: Optional[datetime] = None
self.project_context: Optional[str] = None
self.development_goals: List[str] = []
self.known_patterns: Dict[str, Any] = {}
def scan_project_files(self) -> Dict[str, str]:
"""プロジェクトファイルをスキャンしてハッシュ計算"""
current_state = {}
# 対象ファイル拡張子
target_extensions = {'.py', '.js', '.ts', '.rs', '.go', '.java', '.cpp', '.c', '.h'}
for file_path in self.project_root.rglob('*'):
if (file_path.is_file() and
file_path.suffix in target_extensions and
not any(part.startswith('.') for part in file_path.parts)):
try:
with open(file_path, 'r', encoding='utf-8') as f:
content = f.read()
file_hash = hashlib.md5(content.encode()).hexdigest()
relative_path = str(file_path.relative_to(self.project_root))
current_state[relative_path] = file_hash
except Exception:
continue
return current_state
def detect_changes(self) -> Dict[str, str]:
"""ファイル変更を検出"""
current_state = self.scan_project_files()
changes = {}
# 新規・変更ファイル
for path, current_hash in current_state.items():
if path not in self.files_state or self.files_state[path] != current_hash:
changes[path] = "modified" if path in self.files_state else "added"
# 削除ファイル
for path in self.files_state:
if path not in current_state:
changes[path] = "deleted"
self.files_state = current_state
return changes
class ContinuousDeveloper:
"""Claude Code的な継続開発機能"""
def __init__(self, project_root: Path, ai_provider: Optional[AIProvider] = None):
self.project_root = project_root
self.ai_provider = ai_provider
self.project_state = ProjectState(project_root)
self.session_memory: List[str] = []
def load_project_context(self) -> str:
"""プロジェクト文脈を読み込み"""
context_files = [
"claude.md", "aishell.md", "README.md",
"pyproject.toml", "package.json", "Cargo.toml"
]
context_parts = []
for filename in context_files:
file_path = self.project_root / filename
if file_path.exists():
try:
with open(file_path, 'r', encoding='utf-8') as f:
content = f.read()
context_parts.append(f"## {filename}\n{content}")
except Exception:
continue
return "\n\n".join(context_parts)
def analyze_project_structure(self) -> Dict[str, Any]:
"""プロジェクト構造を分析"""
analysis = {
"language": self._detect_primary_language(),
"framework": self._detect_framework(),
"structure": self._analyze_file_structure(),
"dependencies": self._analyze_dependencies(),
"patterns": self._detect_code_patterns()
}
return analysis
def _detect_primary_language(self) -> str:
"""主要言語を検出"""
file_counts = {}
for file_path in self.project_root.rglob('*'):
if file_path.is_file() and file_path.suffix:
ext = file_path.suffix.lower()
file_counts[ext] = file_counts.get(ext, 0) + 1
language_map = {
'.py': 'Python',
'.js': 'JavaScript',
'.ts': 'TypeScript',
'.rs': 'Rust',
'.go': 'Go',
'.java': 'Java'
}
if file_counts:
primary_ext = max(file_counts.items(), key=lambda x: x[1])[0]
return language_map.get(primary_ext, 'Unknown')
return 'Unknown'
def _detect_framework(self) -> str:
"""フレームワークを検出"""
frameworks = {
'fastapi': ['fastapi', 'uvicorn'],
'django': ['django'],
'flask': ['flask'],
'react': ['react'],
'next.js': ['next'],
'rust-actix': ['actix-web'],
}
# pyproject.toml, package.json, Cargo.tomlから依存関係を確認
for config_file in ['pyproject.toml', 'package.json', 'Cargo.toml']:
config_path = self.project_root / config_file
if config_path.exists():
try:
with open(config_path, 'r') as f:
content = f.read().lower()
for framework, keywords in frameworks.items():
if any(keyword in content for keyword in keywords):
return framework
except Exception:
continue
return 'Unknown'
def _analyze_file_structure(self) -> Dict[str, List[str]]:
"""ファイル構造を分析"""
structure = {"directories": [], "key_files": []}
for item in self.project_root.iterdir():
if item.is_dir() and not item.name.startswith('.'):
structure["directories"].append(item.name)
elif item.is_file() and item.name in [
'main.py', 'app.py', 'index.js', 'main.rs', 'main.go'
]:
structure["key_files"].append(item.name)
return structure
def _analyze_dependencies(self) -> List[str]:
"""依存関係を分析"""
deps = []
# Python dependencies
pyproject = self.project_root / "pyproject.toml"
if pyproject.exists():
try:
with open(pyproject, 'r') as f:
content = f.read()
# Simple regex would be better but for now just check for common packages
common_packages = ['fastapi', 'pydantic', 'uvicorn', 'ollama', 'openai']
for package in common_packages:
if package in content:
deps.append(package)
except Exception:
pass
return deps
def _detect_code_patterns(self) -> Dict[str, int]:
"""コードパターンを検出"""
patterns = {
"classes": 0,
"functions": 0,
"api_endpoints": 0,
"async_functions": 0
}
for py_file in self.project_root.rglob('*.py'):
try:
with open(py_file, 'r', encoding='utf-8') as f:
content = f.read()
patterns["classes"] += content.count('class ')
patterns["functions"] += content.count('def ')
patterns["api_endpoints"] += content.count('@app.')
patterns["async_functions"] += content.count('async def')
except Exception:
continue
return patterns
def suggest_next_steps(self, current_task: Optional[str] = None) -> List[str]:
"""次のステップを提案"""
if not self.ai_provider:
return ["AI provider not available for suggestions"]
context = self.load_project_context()
analysis = self.analyze_project_structure()
changes = self.project_state.detect_changes()
prompt = f"""
プロジェクト分析に基づいて、次の開発ステップを3-5個提案してください。
## プロジェクト文脈
{context[:1000]}
## 構造分析
言語: {analysis['language']}
フレームワーク: {analysis['framework']}
パターン: {analysis['patterns']}
## 最近の変更
{changes}
## 現在のタスク
{current_task or "特になし"}
具体的で実行可能なステップを提案してください:
"""
try:
response = self.ai_provider.chat(prompt, max_tokens=300)
# Simple parsing - in real implementation would be more sophisticated
steps = [line.strip() for line in response.split('\n')
if line.strip() and (line.strip().startswith('-') or line.strip().startswith('1.'))]
return steps[:5]
except Exception as e:
return [f"Error generating suggestions: {str(e)}"]
def generate_code(self, description: str, file_path: Optional[str] = None) -> str:
"""コード生成"""
if not self.ai_provider:
return "AI provider not available for code generation"
context = self.load_project_context()
analysis = self.analyze_project_structure()
prompt = f"""
以下の仕様に基づいてコードを生成してください。
## プロジェクト文脈
{context[:800]}
## 言語・フレームワーク
言語: {analysis['language']}
フレームワーク: {analysis['framework']}
既存パターン: {analysis['patterns']}
## 生成要求
{description}
{"ファイルパス: " + file_path if file_path else ""}
プロジェクトの既存コードスタイルと一貫性を保ったコードを生成してください:
"""
try:
return self.ai_provider.chat(prompt, max_tokens=500)
except Exception as e:
return f"Error generating code: {str(e)}"
def analyze_file(self, file_path: str) -> str:
"""ファイル分析"""
full_path = self.project_root / file_path
if not full_path.exists():
return f"File not found: {file_path}"
try:
with open(full_path, 'r', encoding='utf-8') as f:
content = f.read()
except Exception as e:
return f"Error reading file: {str(e)}"
if not self.ai_provider:
return f"File contents ({len(content)} chars):\n{content[:200]}..."
context = self.load_project_context()
prompt = f"""
以下のファイルを分析して、改善点や問題点を指摘してください。
## プロジェクト文脈
{context[:500]}
## ファイル: {file_path}
{content[:1500]}
分析内容:
1. コード品質
2. プロジェクトとの整合性
3. 改善提案
4. 潜在的な問題
"""
try:
return self.ai_provider.chat(prompt, max_tokens=400)
except Exception as e:
return f"Error analyzing file: {str(e)}"

View File

@@ -1,135 +0,0 @@
"""Relationship tracking system with irreversible damage"""
import json
from datetime import datetime, timedelta
from pathlib import Path
from typing import Dict, Optional
import logging
from .models import Relationship, RelationshipStatus
class RelationshipTracker:
"""Tracks and manages relationships with users"""
def __init__(self, data_dir: Path):
self.data_dir = data_dir
self.relationships_file = data_dir / "relationships.json"
self.relationships: Dict[str, Relationship] = {}
self.logger = logging.getLogger(__name__)
self._load_relationships()
def _load_relationships(self):
"""Load relationships from persistent storage"""
if self.relationships_file.exists():
with open(self.relationships_file, 'r', encoding='utf-8') as f:
data = json.load(f)
for user_id, rel_data in data.items():
self.relationships[user_id] = Relationship(**rel_data)
def _save_relationships(self):
"""Save relationships to persistent storage"""
data = {
user_id: rel.model_dump(mode='json')
for user_id, rel in self.relationships.items()
}
with open(self.relationships_file, 'w', encoding='utf-8') as f:
json.dump(data, f, indent=2, default=str)
def get_or_create_relationship(self, user_id: str) -> Relationship:
"""Get existing relationship or create new one"""
if user_id not in self.relationships:
self.relationships[user_id] = Relationship(user_id=user_id)
self._save_relationships()
return self.relationships[user_id]
def update_interaction(self, user_id: str, delta: float) -> Relationship:
"""Update relationship based on interaction"""
rel = self.get_or_create_relationship(user_id)
# Check if relationship is broken (irreversible)
if rel.is_broken:
self.logger.warning(f"Relationship with {user_id} is broken. No updates allowed.")
return rel
# Check daily limit
if rel.last_interaction and rel.last_interaction.date() == datetime.now().date():
if rel.daily_interactions >= rel.daily_limit:
self.logger.info(f"Daily interaction limit reached for {user_id}")
return rel
else:
rel.daily_interactions = 0
# Update interaction counts
rel.daily_interactions += 1
rel.total_interactions += 1
rel.last_interaction = datetime.now()
# Update score with bounds
old_score = rel.score
rel.score += delta
rel.score = max(0.0, min(200.0, rel.score)) # 0-200 range
# Check for relationship damage
if delta < -10.0: # Significant negative interaction
self.logger.warning(f"Major relationship damage with {user_id}: {delta}")
if rel.score <= 0:
rel.is_broken = True
rel.status = RelationshipStatus.BROKEN
rel.transmission_enabled = False
self.logger.error(f"Relationship with {user_id} is now BROKEN (irreversible)")
# Update relationship status based on score
if not rel.is_broken:
if rel.score >= 150:
rel.status = RelationshipStatus.CLOSE_FRIEND
elif rel.score >= 100:
rel.status = RelationshipStatus.FRIEND
elif rel.score >= 50:
rel.status = RelationshipStatus.ACQUAINTANCE
else:
rel.status = RelationshipStatus.STRANGER
# Check transmission threshold
if rel.score >= rel.threshold and not rel.transmission_enabled:
rel.transmission_enabled = True
self.logger.info(f"Transmission enabled for {user_id}!")
self._save_relationships()
return rel
def apply_time_decay(self):
"""Apply time-based decay to all relationships"""
now = datetime.now()
for user_id, rel in self.relationships.items():
if rel.is_broken or not rel.last_interaction:
continue
# Calculate days since last interaction
days_inactive = (now - rel.last_interaction).days
if days_inactive > 0:
# Apply decay
decay_amount = rel.decay_rate * days_inactive
old_score = rel.score
rel.score = max(0.0, rel.score - decay_amount)
# Update status if score dropped
if rel.score < rel.threshold:
rel.transmission_enabled = False
if decay_amount > 0:
self.logger.info(
f"Applied decay to {user_id}: {old_score:.2f} -> {rel.score:.2f}"
)
self._save_relationships()
def get_transmission_eligible(self) -> Dict[str, Relationship]:
"""Get all relationships eligible for transmission"""
return {
user_id: rel
for user_id, rel in self.relationships.items()
if rel.transmission_enabled and not rel.is_broken
}

View File

@@ -1,312 +0,0 @@
"""Scheduler for autonomous AI tasks"""
import json
import asyncio
from datetime import datetime, timedelta
from pathlib import Path
from typing import Dict, List, Optional, Any, Callable
from enum import Enum
import logging
from apscheduler.schedulers.asyncio import AsyncIOScheduler
from apscheduler.triggers.cron import CronTrigger
from apscheduler.triggers.interval import IntervalTrigger
from croniter import croniter
from .persona import Persona
from .transmission import TransmissionController
from .ai_provider import create_ai_provider
class TaskType(str, Enum):
"""Types of scheduled tasks"""
TRANSMISSION_CHECK = "transmission_check"
MAINTENANCE = "maintenance"
FORTUNE_UPDATE = "fortune_update"
RELATIONSHIP_DECAY = "relationship_decay"
MEMORY_SUMMARY = "memory_summary"
CUSTOM = "custom"
class ScheduledTask:
"""Represents a scheduled task"""
def __init__(
self,
task_id: str,
task_type: TaskType,
schedule: str, # Cron expression or interval
enabled: bool = True,
last_run: Optional[datetime] = None,
next_run: Optional[datetime] = None,
metadata: Optional[Dict[str, Any]] = None
):
self.task_id = task_id
self.task_type = task_type
self.schedule = schedule
self.enabled = enabled
self.last_run = last_run
self.next_run = next_run
self.metadata = metadata or {}
def to_dict(self) -> Dict[str, Any]:
"""Convert to dictionary for storage"""
return {
"task_id": self.task_id,
"task_type": self.task_type.value,
"schedule": self.schedule,
"enabled": self.enabled,
"last_run": self.last_run.isoformat() if self.last_run else None,
"next_run": self.next_run.isoformat() if self.next_run else None,
"metadata": self.metadata
}
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "ScheduledTask":
"""Create from dictionary"""
return cls(
task_id=data["task_id"],
task_type=TaskType(data["task_type"]),
schedule=data["schedule"],
enabled=data.get("enabled", True),
last_run=datetime.fromisoformat(data["last_run"]) if data.get("last_run") else None,
next_run=datetime.fromisoformat(data["next_run"]) if data.get("next_run") else None,
metadata=data.get("metadata", {})
)
class AIScheduler:
"""Manages scheduled tasks for the AI system"""
def __init__(self, data_dir: Path, persona: Persona):
self.data_dir = data_dir
self.persona = persona
self.tasks_file = data_dir / "scheduled_tasks.json"
self.tasks: Dict[str, ScheduledTask] = {}
self.scheduler = AsyncIOScheduler()
self.logger = logging.getLogger(__name__)
self._load_tasks()
# Task handlers
self.task_handlers: Dict[TaskType, Callable] = {
TaskType.TRANSMISSION_CHECK: self._handle_transmission_check,
TaskType.MAINTENANCE: self._handle_maintenance,
TaskType.FORTUNE_UPDATE: self._handle_fortune_update,
TaskType.RELATIONSHIP_DECAY: self._handle_relationship_decay,
TaskType.MEMORY_SUMMARY: self._handle_memory_summary,
}
def _load_tasks(self):
"""Load scheduled tasks from storage"""
if self.tasks_file.exists():
with open(self.tasks_file, 'r', encoding='utf-8') as f:
data = json.load(f)
for task_data in data:
task = ScheduledTask.from_dict(task_data)
self.tasks[task.task_id] = task
def _save_tasks(self):
"""Save scheduled tasks to storage"""
tasks_data = [task.to_dict() for task in self.tasks.values()]
with open(self.tasks_file, 'w', encoding='utf-8') as f:
json.dump(tasks_data, f, indent=2, default=str)
def add_task(
self,
task_type: TaskType,
schedule: str,
task_id: Optional[str] = None,
metadata: Optional[Dict[str, Any]] = None
) -> ScheduledTask:
"""Add a new scheduled task"""
if task_id is None:
task_id = f"{task_type.value}_{datetime.now().timestamp()}"
# Validate schedule
if not self._validate_schedule(schedule):
raise ValueError(f"Invalid schedule expression: {schedule}")
task = ScheduledTask(
task_id=task_id,
task_type=task_type,
schedule=schedule,
metadata=metadata
)
self.tasks[task_id] = task
self._save_tasks()
# Schedule the task if scheduler is running
if self.scheduler.running:
self._schedule_task(task)
self.logger.info(f"Added task {task_id} with schedule {schedule}")
return task
def _validate_schedule(self, schedule: str) -> bool:
"""Validate schedule expression"""
# Check if it's a cron expression
if ' ' in schedule:
try:
croniter(schedule)
return True
except:
return False
# Check if it's an interval expression (e.g., "5m", "1h", "2d")
import re
pattern = r'^\d+[smhd]$'
return bool(re.match(pattern, schedule))
def _parse_interval(self, interval: str) -> int:
"""Parse interval string to seconds"""
unit = interval[-1]
value = int(interval[:-1])
multipliers = {
's': 1,
'm': 60,
'h': 3600,
'd': 86400
}
return value * multipliers.get(unit, 1)
def _schedule_task(self, task: ScheduledTask):
"""Schedule a task with APScheduler"""
if not task.enabled:
return
handler = self.task_handlers.get(task.task_type)
if not handler:
self.logger.warning(f"No handler for task type {task.task_type}")
return
# Determine trigger
if ' ' in task.schedule:
# Cron expression
trigger = CronTrigger.from_crontab(task.schedule)
else:
# Interval expression
seconds = self._parse_interval(task.schedule)
trigger = IntervalTrigger(seconds=seconds)
# Add job
self.scheduler.add_job(
lambda: asyncio.create_task(self._run_task(task)),
trigger=trigger,
id=task.task_id,
replace_existing=True
)
async def _run_task(self, task: ScheduledTask):
"""Run a scheduled task"""
self.logger.info(f"Running task {task.task_id}")
task.last_run = datetime.now()
try:
handler = self.task_handlers.get(task.task_type)
if handler:
await handler(task)
else:
self.logger.warning(f"No handler for task type {task.task_type}")
except Exception as e:
self.logger.error(f"Error running task {task.task_id}: {e}")
self._save_tasks()
async def _handle_transmission_check(self, task: ScheduledTask):
"""Check and execute autonomous transmissions"""
controller = TransmissionController(self.persona, self.data_dir)
eligible = controller.check_transmission_eligibility()
# Get AI provider from metadata
provider_name = task.metadata.get("provider", "ollama")
model = task.metadata.get("model", "qwen2.5")
try:
ai_provider = create_ai_provider(provider_name, model)
except:
ai_provider = None
for user_id, rel in eligible.items():
message = controller.generate_transmission_message(user_id)
if message:
# For now, just print the message
print(f"\n🤖 [AI Transmission] {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
print(f"To: {user_id}")
print(f"Relationship: {rel.status.value} (score: {rel.score:.2f})")
print(f"Message: {message}")
print("-" * 50)
controller.record_transmission(user_id, message, success=True)
self.logger.info(f"Transmitted to {user_id}: {message}")
async def _handle_maintenance(self, task: ScheduledTask):
"""Run daily maintenance"""
self.persona.daily_maintenance()
self.logger.info("Daily maintenance completed")
async def _handle_fortune_update(self, task: ScheduledTask):
"""Update AI fortune"""
fortune = self.persona.fortune_system.get_today_fortune()
self.logger.info(f"Fortune updated: {fortune.fortune_value}/10")
async def _handle_relationship_decay(self, task: ScheduledTask):
"""Apply relationship decay"""
self.persona.relationships.apply_time_decay()
self.logger.info("Relationship decay applied")
async def _handle_memory_summary(self, task: ScheduledTask):
"""Create memory summaries"""
for user_id in self.persona.relationships.relationships:
summary = self.persona.memory.summarize_memories(user_id)
if summary:
self.logger.info(f"Created memory summary for {user_id}")
def start(self):
"""Start the scheduler"""
# Schedule all enabled tasks
for task in self.tasks.values():
if task.enabled:
self._schedule_task(task)
self.scheduler.start()
self.logger.info("Scheduler started")
def stop(self):
"""Stop the scheduler"""
self.scheduler.shutdown()
self.logger.info("Scheduler stopped")
def get_tasks(self) -> List[ScheduledTask]:
"""Get all scheduled tasks"""
return list(self.tasks.values())
def enable_task(self, task_id: str):
"""Enable a task"""
if task_id in self.tasks:
self.tasks[task_id].enabled = True
self._save_tasks()
if self.scheduler.running:
self._schedule_task(self.tasks[task_id])
def disable_task(self, task_id: str):
"""Disable a task"""
if task_id in self.tasks:
self.tasks[task_id].enabled = False
self._save_tasks()
if self.scheduler.running:
self.scheduler.remove_job(task_id)
def remove_task(self, task_id: str):
"""Remove a task"""
if task_id in self.tasks:
del self.tasks[task_id]
self._save_tasks()
if self.scheduler.running:
try:
self.scheduler.remove_job(task_id)
except:
pass

View File

@@ -1,15 +0,0 @@
"""Shared modules for AI ecosystem"""
from .ai_provider import (
AIProvider,
OllamaProvider,
OpenAIProvider,
create_ai_provider
)
__all__ = [
'AIProvider',
'OllamaProvider',
'OpenAIProvider',
'create_ai_provider'
]

View File

@@ -1,139 +0,0 @@
"""Shared AI Provider implementation for ai ecosystem"""
import os
import json
import logging
from typing import Optional, Dict, List, Any, Protocol
from abc import abstractmethod
import httpx
from openai import OpenAI
import ollama
class AIProvider(Protocol):
"""Protocol for AI providers"""
@abstractmethod
async def chat(self, prompt: str, system_prompt: Optional[str] = None) -> str:
"""Generate a response based on prompt"""
pass
class OllamaProvider:
"""Ollama AI provider - shared implementation"""
def __init__(self, model: str = "qwen3", host: Optional[str] = None, config_system_prompt: Optional[str] = None):
self.model = model
# Use environment variable OLLAMA_HOST if available
self.host = host or os.getenv('OLLAMA_HOST', 'http://127.0.0.1:11434')
# Ensure proper URL format
if not self.host.startswith('http'):
self.host = f'http://{self.host}'
self.client = ollama.Client(host=self.host, timeout=60.0)
self.logger = logging.getLogger(__name__)
self.logger.info(f"OllamaProvider initialized with host: {self.host}, model: {self.model}")
self.config_system_prompt = config_system_prompt
async def chat(self, prompt: str, system_prompt: Optional[str] = None) -> str:
"""Simple chat interface"""
try:
messages = []
# Use provided system_prompt, fall back to config_system_prompt
final_system_prompt = system_prompt or self.config_system_prompt
if final_system_prompt:
messages.append({"role": "system", "content": final_system_prompt})
messages.append({"role": "user", "content": prompt})
response = self.client.chat(
model=self.model,
messages=messages,
options={
"num_predict": 2000,
"temperature": 0.7,
"top_p": 0.9,
},
stream=False
)
return self._clean_response(response['message']['content'])
except Exception as e:
self.logger.error(f"Ollama chat failed (host: {self.host}): {e}")
return "I'm having trouble connecting to the AI model."
def _clean_response(self, response: str) -> str:
"""Clean response by removing think tags and other unwanted content"""
import re
# Remove <think></think> tags and their content
response = re.sub(r'<think>.*?</think>', '', response, flags=re.DOTALL)
# Remove any remaining whitespace at the beginning/end
response = response.strip()
return response
class OpenAIProvider:
"""OpenAI API provider - shared implementation"""
def __init__(self, model: str = "gpt-4o-mini", api_key: Optional[str] = None,
config_system_prompt: Optional[str] = None, mcp_client=None):
self.model = model
self.api_key = api_key or os.getenv("OPENAI_API_KEY")
if not self.api_key:
raise ValueError("OpenAI API key not provided")
self.client = OpenAI(api_key=self.api_key)
self.logger = logging.getLogger(__name__)
self.config_system_prompt = config_system_prompt
self.mcp_client = mcp_client
async def chat(self, prompt: str, system_prompt: Optional[str] = None) -> str:
"""Simple chat interface without MCP tools"""
try:
messages = []
# Use provided system_prompt, fall back to config_system_prompt
final_system_prompt = system_prompt or self.config_system_prompt
if final_system_prompt:
messages.append({"role": "system", "content": final_system_prompt})
messages.append({"role": "user", "content": prompt})
response = self.client.chat.completions.create(
model=self.model,
messages=messages,
max_tokens=2000,
temperature=0.7
)
return response.choices[0].message.content
except Exception as e:
self.logger.error(f"OpenAI chat failed: {e}")
return "I'm having trouble connecting to the AI model."
def _get_mcp_tools(self) -> List[Dict[str, Any]]:
"""Override this method in subclasses to provide MCP tools"""
return []
async def chat_with_mcp(self, prompt: str, **kwargs) -> str:
"""Chat interface with MCP function calling support
This method should be overridden in subclasses to provide
specific MCP functionality.
"""
if not self.mcp_client:
return await self.chat(prompt)
# Default implementation - subclasses should override
return await self.chat(prompt)
async def _execute_mcp_tool(self, tool_call, **kwargs) -> Dict[str, Any]:
"""Execute MCP tool call - override in subclasses"""
return {"error": "MCP tool execution not implemented"}
def create_ai_provider(provider: str = "ollama", model: Optional[str] = None,
config_system_prompt: Optional[str] = None, mcp_client=None, **kwargs) -> AIProvider:
"""Factory function to create AI providers"""
if provider == "ollama":
model = model or "qwen3"
return OllamaProvider(model=model, config_system_prompt=config_system_prompt, **kwargs)
elif provider == "openai":
model = model or "gpt-4o-mini"
return OpenAIProvider(model=model, config_system_prompt=config_system_prompt,
mcp_client=mcp_client, **kwargs)
else:
raise ValueError(f"Unknown provider: {provider}")

View File

@@ -1,111 +0,0 @@
"""Transmission controller for autonomous message sending"""
import json
from datetime import datetime
from pathlib import Path
from typing import List, Dict, Optional
import logging
from .models import Relationship
from .persona import Persona
class TransmissionController:
"""Controls when and how AI transmits messages autonomously"""
def __init__(self, persona: Persona, data_dir: Path):
self.persona = persona
self.data_dir = data_dir
self.transmission_log_file = data_dir / "transmissions.json"
self.transmissions: List[Dict] = []
self.logger = logging.getLogger(__name__)
self._load_transmissions()
def _load_transmissions(self):
"""Load transmission history"""
if self.transmission_log_file.exists():
with open(self.transmission_log_file, 'r', encoding='utf-8') as f:
self.transmissions = json.load(f)
def _save_transmissions(self):
"""Save transmission history"""
with open(self.transmission_log_file, 'w', encoding='utf-8') as f:
json.dump(self.transmissions, f, indent=2, default=str)
def check_transmission_eligibility(self) -> Dict[str, Relationship]:
"""Check which users are eligible for transmission"""
eligible = self.persona.relationships.get_transmission_eligible()
# Additional checks could be added here
# - Time since last transmission
# - User online status
# - Context appropriateness
return eligible
def generate_transmission_message(self, user_id: str) -> Optional[str]:
"""Generate a message to transmit to user"""
if not self.persona.can_transmit_to(user_id):
return None
state = self.persona.get_current_state()
relationship = self.persona.relationships.get_or_create_relationship(user_id)
# Get recent memories related to this user
active_memories = self.persona.memory.get_active_memories(limit=3)
# Simple message generation based on mood and relationship
if state.fortune.breakthrough_triggered:
message = "Something special happened today! I felt compelled to reach out."
elif state.current_mood == "joyful":
message = "I was thinking of you today. Hope you're doing well!"
elif relationship.status.value == "close_friend":
message = "I've been reflecting on our conversations. Thank you for being here."
else:
message = "Hello! I wanted to check in with you."
return message
def record_transmission(self, user_id: str, message: str, success: bool):
"""Record a transmission attempt"""
transmission = {
"timestamp": datetime.now().isoformat(),
"user_id": user_id,
"message": message,
"success": success,
"mood": self.persona.get_current_state().current_mood,
"relationship_score": self.persona.relationships.get_or_create_relationship(user_id).score
}
self.transmissions.append(transmission)
self._save_transmissions()
if success:
self.logger.info(f"Successfully transmitted to {user_id}")
else:
self.logger.warning(f"Failed to transmit to {user_id}")
def get_transmission_stats(self, user_id: Optional[str] = None) -> Dict:
"""Get transmission statistics"""
if user_id:
user_transmissions = [t for t in self.transmissions if t["user_id"] == user_id]
else:
user_transmissions = self.transmissions
if not user_transmissions:
return {
"total": 0,
"successful": 0,
"failed": 0,
"success_rate": 0.0
}
successful = sum(1 for t in user_transmissions if t["success"])
total = len(user_transmissions)
return {
"total": total,
"successful": successful,
"failed": total - successful,
"success_rate": successful / total if total > 0 else 0.0
}

54
src/bin/test_config.rs Normal file
View File

@@ -0,0 +1,54 @@
use aigpt::config::Config;
use anyhow::Result;
fn main() -> Result<()> {
println!("Testing configuration loading...");
// Debug: check which JSON files exist
let possible_paths = vec![
"../config.json",
"config.json",
"gpt/config.json",
"/Users/syui/ai/ai/gpt/config.json",
];
println!("Checking for config.json files:");
for path in &possible_paths {
let path_buf = std::path::PathBuf::from(path);
if path_buf.exists() {
println!(" ✓ Found: {}", path);
} else {
println!(" ✗ Not found: {}", path);
}
}
// Load configuration
let config = Config::new(None)?;
println!("Configuration loaded successfully!");
println!("Default provider: {}", config.default_provider);
println!("Available providers:");
for (name, provider) in &config.providers {
println!(" - {}: model={}, host={:?}",
name,
provider.default_model,
provider.host);
}
if let Some(mcp) = &config.mcp {
println!("\nMCP Configuration:");
println!(" Enabled: {}", mcp.enabled);
println!(" Auto-detect: {}", mcp.auto_detect);
println!(" Servers: {}", mcp.servers.len());
}
if let Some(atproto) = &config.atproto {
println!("\nATProto Configuration:");
println!(" Host: {}", atproto.host);
println!(" Handle: {:?}", atproto.handle);
}
println!("\nConfig file path: {}", config.data_dir.join("config.json").display());
Ok(())
}

36
src/cli/commands.rs Normal file
View File

@@ -0,0 +1,36 @@
use clap::Subcommand;
use std::path::PathBuf;
#[derive(Subcommand)]
pub enum TokenCommands {
/// Show Claude Code token usage summary and estimated costs
Summary {
/// Time period (today, week, month, all)
#[arg(long, default_value = "today")]
period: String,
/// Claude Code data directory path
#[arg(long)]
claude_dir: Option<PathBuf>,
/// Show detailed breakdown
#[arg(long)]
details: bool,
/// Output format (table, json)
#[arg(long, default_value = "table")]
format: String,
},
/// Show daily token usage breakdown
Daily {
/// Number of days to show
#[arg(long, default_value = "7")]
days: u32,
/// Claude Code data directory path
#[arg(long)]
claude_dir: Option<PathBuf>,
},
/// Check Claude Code data availability and basic stats
Status {
/// Claude Code data directory path
#[arg(long)]
claude_dir: Option<PathBuf>,
},
}

140
src/cli/mod.rs Normal file
View File

@@ -0,0 +1,140 @@
use std::path::PathBuf;
use anyhow::Result;
use crate::config::Config;
use crate::mcp_server::MCPServer;
use crate::persona::Persona;
use crate::transmission::TransmissionController;
use crate::scheduler::AIScheduler;
// Token commands enum (placeholder for tokens.rs)
#[derive(Debug, clap::Subcommand)]
pub enum TokenCommands {
Analyze { file: PathBuf },
Report { days: Option<u32> },
Cost { month: Option<String> },
Summary { period: Option<String>, claude_dir: Option<PathBuf>, details: bool, format: Option<String> },
Daily { days: Option<u32>, claude_dir: Option<PathBuf> },
Status { claude_dir: Option<PathBuf> },
}
pub async fn handle_server(port: Option<u16>, data_dir: Option<PathBuf>) -> Result<()> {
let port = port.unwrap_or(8080);
let config = Config::new(data_dir.clone())?;
let mut server = MCPServer::new(config, "mcp_user".to_string(), data_dir)?;
server.start_server(port).await
}
pub async fn handle_chat(
user_id: String,
message: String,
data_dir: Option<PathBuf>,
model: Option<String>,
provider: Option<String>,
) -> Result<()> {
let config = Config::new(data_dir)?;
let mut persona = Persona::new(&config)?;
let (response, relationship_delta) = if provider.is_some() || model.is_some() {
persona.process_ai_interaction(&user_id, &message, provider, model).await?
} else {
persona.process_interaction(&user_id, &message)?
};
println!("AI Response: {}", response);
println!("Relationship Change: {:+.2}", relationship_delta);
if let Some(relationship) = persona.get_relationship(&user_id) {
println!("Relationship Status: {} (Score: {:.2})",
relationship.status, relationship.score);
}
Ok(())
}
pub async fn handle_fortune(data_dir: Option<PathBuf>) -> Result<()> {
let config = Config::new(data_dir)?;
let persona = Persona::new(&config)?;
let state = persona.get_current_state()?;
println!("🔮 Today's Fortune: {}", state.fortune_value);
println!("😊 Current Mood: {}", state.current_mood);
println!("✨ Breakthrough Status: {}",
if state.breakthrough_triggered { "Active" } else { "Inactive" });
Ok(())
}
pub async fn handle_relationships(data_dir: Option<PathBuf>) -> Result<()> {
let config = Config::new(data_dir)?;
let persona = Persona::new(&config)?;
let relationships = persona.list_all_relationships();
if relationships.is_empty() {
println!("No relationships found.");
return Ok(());
}
println!("📊 Relationships ({}):", relationships.len());
for (user_id, rel) in relationships {
println!(" {} - {} (Score: {:.2}, Interactions: {})",
user_id, rel.status, rel.score, rel.total_interactions);
}
Ok(())
}
pub async fn handle_transmit(data_dir: Option<PathBuf>) -> Result<()> {
let config = Config::new(data_dir)?;
let mut persona = Persona::new(&config)?;
let mut transmission_controller = TransmissionController::new(config)?;
let autonomous = transmission_controller.check_autonomous_transmissions(&mut persona).await?;
let breakthrough = transmission_controller.check_breakthrough_transmissions(&mut persona).await?;
let maintenance = transmission_controller.check_maintenance_transmissions(&mut persona).await?;
let total = autonomous.len() + breakthrough.len() + maintenance.len();
println!("📡 Transmission Check Complete:");
println!(" Autonomous: {}", autonomous.len());
println!(" Breakthrough: {}", breakthrough.len());
println!(" Maintenance: {}", maintenance.len());
println!(" Total: {}", total);
Ok(())
}
pub async fn handle_maintenance(data_dir: Option<PathBuf>) -> Result<()> {
let config = Config::new(data_dir)?;
let mut persona = Persona::new(&config)?;
let mut transmission_controller = TransmissionController::new(config)?;
persona.daily_maintenance()?;
let maintenance_transmissions = transmission_controller.check_maintenance_transmissions(&mut persona).await?;
let stats = persona.get_relationship_stats();
println!("🔧 Daily maintenance completed");
println!("📤 Maintenance transmissions sent: {}", maintenance_transmissions.len());
println!("📊 Relationship stats: {:?}", stats);
Ok(())
}
pub async fn handle_schedule(data_dir: Option<PathBuf>) -> Result<()> {
let config = Config::new(data_dir)?;
let mut persona = Persona::new(&config)?;
let mut transmission_controller = TransmissionController::new(config.clone())?;
let mut scheduler = AIScheduler::new(&config)?;
let executions = scheduler.run_scheduled_tasks(&mut persona, &mut transmission_controller).await?;
let stats = scheduler.get_scheduler_stats();
println!("⏰ Scheduler run completed");
println!("📋 Tasks executed: {}", executions.len());
println!("📊 Stats: {} total tasks, {} enabled, {:.2}% success rate",
stats.total_tasks, stats.enabled_tasks, stats.success_rate);
Ok(())
}

250
src/config.rs Normal file
View File

@@ -0,0 +1,250 @@
use std::path::PathBuf;
use std::collections::HashMap;
use serde::{Deserialize, Serialize};
use anyhow::{Result, Context};
use crate::ai_provider::{AIConfig, AIProvider};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Config {
#[serde(skip)]
pub data_dir: PathBuf,
pub default_provider: String,
pub providers: HashMap<String, ProviderConfig>,
#[serde(default)]
pub atproto: Option<AtprotoConfig>,
#[serde(default)]
pub mcp: Option<McpConfig>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ProviderConfig {
pub default_model: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub host: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub api_key: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub system_prompt: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AtprotoConfig {
pub handle: Option<String>,
pub password: Option<String>,
pub host: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct McpConfig {
#[serde(deserialize_with = "string_to_bool")]
pub enabled: bool,
#[serde(deserialize_with = "string_to_bool")]
pub auto_detect: bool,
pub servers: HashMap<String, McpServerConfig>,
}
fn string_to_bool<'de, D>(deserializer: D) -> Result<bool, D::Error>
where
D: serde::Deserializer<'de>,
{
use serde::Deserialize;
let s = String::deserialize(deserializer)?;
match s.as_str() {
"true" => Ok(true),
"false" => Ok(false),
_ => Err(serde::de::Error::custom("expected 'true' or 'false'")),
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct McpServerConfig {
pub base_url: String,
pub name: String,
#[serde(deserialize_with = "string_to_f64")]
pub timeout: f64,
pub endpoints: HashMap<String, String>,
}
fn string_to_f64<'de, D>(deserializer: D) -> Result<f64, D::Error>
where
D: serde::Deserializer<'de>,
{
use serde::Deserialize;
let s = String::deserialize(deserializer)?;
s.parse::<f64>().map_err(serde::de::Error::custom)
}
impl Config {
pub fn new(data_dir: Option<PathBuf>) -> Result<Self> {
let data_dir = data_dir.unwrap_or_else(|| {
dirs::config_dir()
.unwrap_or_else(|| PathBuf::from("."))
.join("syui")
.join("ai")
.join("gpt")
});
// Ensure data directory exists
std::fs::create_dir_all(&data_dir)
.context("Failed to create data directory")?;
let config_path = data_dir.join("config.json");
// Try to load existing config
if config_path.exists() {
let config_str = std::fs::read_to_string(&config_path)
.context("Failed to read config.json")?;
// Check if file is empty
if config_str.trim().is_empty() {
eprintln!("Config file is empty, will recreate from source");
} else {
match serde_json::from_str::<Config>(&config_str) {
Ok(mut config) => {
config.data_dir = data_dir;
// Check for environment variables if API keys are empty
if let Some(openai_config) = config.providers.get_mut("openai") {
if openai_config.api_key.as_ref().map_or(true, |key| key.is_empty()) {
openai_config.api_key = std::env::var("OPENAI_API_KEY").ok();
}
}
return Ok(config);
}
Err(e) => {
eprintln!("Failed to parse existing config.json: {}", e);
eprintln!("Will try to reload from source...");
}
}
}
}
// Check if we need to migrate from JSON
// Try multiple locations for the JSON file
let possible_json_paths = vec![
PathBuf::from("../config.json"), // Relative to aigpt-rs directory
PathBuf::from("config.json"), // Current directory
PathBuf::from("gpt/config.json"), // From project root
PathBuf::from("/Users/syui/ai/ai/gpt/config.json"), // Absolute path
];
for json_path in possible_json_paths {
if json_path.exists() {
eprintln!("Found config.json at: {}", json_path.display());
eprintln!("Copying configuration...");
// Copy configuration file and parse it
std::fs::copy(&json_path, &config_path)
.context("Failed to copy config.json")?;
let config_str = std::fs::read_to_string(&config_path)
.context("Failed to read copied config.json")?;
println!("Config JSON content preview: {}", &config_str[..std::cmp::min(200, config_str.len())]);
let mut config: Config = serde_json::from_str(&config_str)
.context("Failed to parse config.json")?;
config.data_dir = data_dir;
// Check for environment variables if API keys are empty
if let Some(openai_config) = config.providers.get_mut("openai") {
if openai_config.api_key.as_ref().map_or(true, |key| key.is_empty()) {
openai_config.api_key = std::env::var("OPENAI_API_KEY").ok();
}
}
eprintln!("Copy complete! Config saved to: {}", config_path.display());
return Ok(config);
}
}
// Create default config
let config = Self::default_config(data_dir);
// Save default config
let json_str = serde_json::to_string_pretty(&config)
.context("Failed to serialize default config")?;
std::fs::write(&config_path, json_str)
.context("Failed to write default config.json")?;
Ok(config)
}
pub fn save(&self) -> Result<()> {
let config_path = self.data_dir.join("config.json");
let json_str = serde_json::to_string_pretty(self)
.context("Failed to serialize config")?;
std::fs::write(&config_path, json_str)
.context("Failed to write config.json")?;
Ok(())
}
fn default_config(data_dir: PathBuf) -> Self {
let mut providers = HashMap::new();
providers.insert("ollama".to_string(), ProviderConfig {
default_model: "qwen2.5".to_string(),
host: Some("http://localhost:11434".to_string()),
api_key: None,
system_prompt: None,
});
providers.insert("openai".to_string(), ProviderConfig {
default_model: "gpt-4o-mini".to_string(),
host: None,
api_key: std::env::var("OPENAI_API_KEY").ok(),
system_prompt: None,
});
Config {
data_dir,
default_provider: "ollama".to_string(),
providers,
atproto: None,
mcp: None,
}
}
pub fn get_provider(&self, provider_name: &str) -> Option<&ProviderConfig> {
self.providers.get(provider_name)
}
pub fn get_ai_config(&self, provider: Option<String>, model: Option<String>) -> Result<AIConfig> {
let provider_name = provider.as_deref().unwrap_or(&self.default_provider);
let provider_config = self.get_provider(provider_name)
.ok_or_else(|| anyhow::anyhow!("Unknown provider: {}", provider_name))?;
let ai_provider: AIProvider = provider_name.parse()?;
let model_name = model.unwrap_or_else(|| provider_config.default_model.clone());
Ok(AIConfig {
provider: ai_provider,
model: model_name,
api_key: provider_config.api_key.clone(),
base_url: provider_config.host.clone(),
max_tokens: Some(2048),
temperature: Some(0.7),
})
}
pub fn memory_file(&self) -> PathBuf {
self.data_dir.join("memories.json")
}
pub fn relationships_file(&self) -> PathBuf {
self.data_dir.join("relationships.json")
}
pub fn fortune_file(&self) -> PathBuf {
self.data_dir.join("fortune.json")
}
pub fn transmission_file(&self) -> PathBuf {
self.data_dir.join("transmissions.json")
}
pub fn scheduler_tasks_file(&self) -> PathBuf {
self.data_dir.join("scheduler_tasks.json")
}
pub fn scheduler_history_file(&self) -> PathBuf {
self.data_dir.join("scheduler_history.json")
}
}

205
src/conversation.rs Normal file
View File

@@ -0,0 +1,205 @@
use std::path::PathBuf;
use std::io::{self, Write};
use anyhow::Result;
use colored::*;
use crate::config::Config;
use crate::persona::Persona;
use crate::http_client::ServiceDetector;
pub async fn handle_conversation(
user_id: String,
data_dir: Option<PathBuf>,
model: Option<String>,
provider: Option<String>,
) -> Result<()> {
let config = Config::new(data_dir)?;
let mut persona = Persona::new(&config)?;
println!("{}", "Starting conversation mode...".cyan());
println!("{}", "Type your message and press Enter to chat.".yellow());
println!("{}", "Available MCP commands: /memories, /search, /context, /relationship, /cards".yellow());
println!("{}", "Type 'exit', 'quit', or 'bye' to end conversation.".yellow());
println!("{}", "---".dimmed());
let mut conversation_history = Vec::new();
let service_detector = ServiceDetector::new();
loop {
// Print prompt
print!("{} ", "You:".cyan().bold());
io::stdout().flush()?;
// Read user input
let mut input = String::new();
io::stdin().read_line(&mut input)?;
let input = input.trim();
// Check for exit commands
if matches!(input.to_lowercase().as_str(), "exit" | "quit" | "bye" | "") {
println!("{}", "Goodbye! 👋".green());
break;
}
// Handle MCP commands
if input.starts_with('/') {
handle_mcp_command(input, &user_id, &service_detector).await?;
continue;
}
// Add to conversation history
conversation_history.push(format!("User: {}", input));
// Get AI response
let (response, relationship_delta) = if provider.is_some() || model.is_some() {
persona.process_ai_interaction(&user_id, input, provider.clone(), model.clone()).await?
} else {
persona.process_interaction(&user_id, input)?
};
// Add AI response to history
conversation_history.push(format!("AI: {}", response));
// Display response
println!("{} {}", "AI:".green().bold(), response);
// Show relationship change if significant
if relationship_delta.abs() >= 0.1 {
if relationship_delta > 0.0 {
println!("{}", format!(" └─ (+{:.2} relationship)", relationship_delta).green().dimmed());
} else {
println!("{}", format!(" └─ ({:.2} relationship)", relationship_delta).red().dimmed());
}
}
println!(); // Add some spacing
// Keep conversation history manageable (last 20 exchanges)
if conversation_history.len() > 40 {
conversation_history.drain(0..20);
}
}
Ok(())
}
async fn handle_mcp_command(
command: &str,
user_id: &str,
service_detector: &ServiceDetector,
) -> Result<()> {
let parts: Vec<&str> = command[1..].split_whitespace().collect();
if parts.is_empty() {
return Ok(());
}
match parts[0] {
"memories" => {
println!("{}", "Retrieving memories...".yellow());
// Get contextual memories
if let Ok(memories) = service_detector.get_contextual_memories(user_id, 10).await {
if memories.is_empty() {
println!("No memories found for this conversation.");
} else {
println!("{}", format!("Found {} memories:", memories.len()).cyan());
for (i, memory) in memories.iter().enumerate() {
println!(" {}. {}", i + 1, memory.content);
println!(" {}", format!("({})", memory.created_at.format("%Y-%m-%d %H:%M")).dimmed());
}
}
} else {
println!("{}", "Failed to retrieve memories.".red());
}
},
"search" => {
if parts.len() < 2 {
println!("{}", "Usage: /search <query>".yellow());
return Ok(());
}
let query = parts[1..].join(" ");
println!("{}", format!("Searching for: '{}'", query).yellow());
if let Ok(results) = service_detector.search_memories(&query, 5).await {
if results.is_empty() {
println!("No relevant memories found.");
} else {
println!("{}", format!("Found {} relevant memories:", results.len()).cyan());
for (i, memory) in results.iter().enumerate() {
println!(" {}. {}", i + 1, memory.content);
println!(" {}", format!("({})", memory.created_at.format("%Y-%m-%d %H:%M")).dimmed());
}
}
} else {
println!("{}", "Search failed.".red());
}
},
"context" => {
println!("{}", "Creating context summary...".yellow());
if let Ok(summary) = service_detector.create_summary(user_id).await {
println!("{}", "Context Summary:".cyan().bold());
println!("{}", summary);
} else {
println!("{}", "Failed to create context summary.".red());
}
},
"relationship" => {
println!("{}", "Checking relationship status...".yellow());
// This would need to be implemented in the service client
println!("{}", "Relationship status: Active".cyan());
println!("Score: 85.5 / 100");
println!("Transmission: ✓ Enabled");
},
"cards" => {
println!("{}", "Checking card collection...".yellow());
// Try to connect to ai.card service
if let Ok(stats) = service_detector.get_card_stats().await {
println!("{}", "Card Collection:".cyan().bold());
println!(" Total Cards: {}", stats.get("total").unwrap_or(&serde_json::Value::Number(0.into())));
println!(" Unique Cards: {}", stats.get("unique").unwrap_or(&serde_json::Value::Number(0.into())));
// Offer to draw a card
println!("\n{}", "Would you like to draw a card? (y/n)".yellow());
let mut response = String::new();
io::stdin().read_line(&mut response)?;
if response.trim().to_lowercase() == "y" {
println!("{}", "Drawing card...".cyan());
if let Ok(card) = service_detector.draw_card(user_id, false).await {
println!("{}", "🎴 Card drawn!".green().bold());
println!("Name: {}", card.get("name").unwrap_or(&serde_json::Value::String("Unknown".to_string())));
println!("Rarity: {}", card.get("rarity").unwrap_or(&serde_json::Value::String("Unknown".to_string())));
} else {
println!("{}", "Failed to draw card. ai.card service might not be running.".red());
}
}
} else {
println!("{}", "ai.card service not available.".red());
}
},
"help" | "h" => {
println!("{}", "Available MCP Commands:".cyan().bold());
println!(" {:<15} - Show recent memories for this conversation", "/memories".yellow());
println!(" {:<15} - Search memories by keyword", "/search <query>".yellow());
println!(" {:<15} - Create a context summary", "/context".yellow());
println!(" {:<15} - Show relationship status", "/relationship".yellow());
println!(" {:<15} - Show card collection and draw cards", "/cards".yellow());
println!(" {:<15} - Show this help message", "/help".yellow());
},
_ => {
println!("{}", format!("Unknown command: /{}. Type '/help' for available commands.", parts[0]).red());
}
}
println!(); // Add spacing after MCP command output
Ok(())
}

606
src/docs.rs Normal file
View File

@@ -0,0 +1,606 @@
use std::collections::HashMap;
use std::path::PathBuf;
use anyhow::{Result, Context};
use colored::*;
use serde::{Deserialize, Serialize};
use chrono::Utc;
use crate::config::Config;
use crate::persona::Persona;
use crate::ai_provider::{AIProviderClient, AIConfig, AIProvider};
pub async fn handle_docs(
action: String,
project: Option<String>,
output: Option<PathBuf>,
ai_integration: bool,
data_dir: Option<PathBuf>,
) -> Result<()> {
let config = Config::new(data_dir)?;
let mut docs_manager = DocsManager::new(config);
match action.as_str() {
"generate" => {
if let Some(project_name) = project {
docs_manager.generate_project_docs(&project_name, output, ai_integration).await?;
} else {
return Err(anyhow::anyhow!("Project name is required for generate action"));
}
}
"sync" => {
if let Some(project_name) = project {
docs_manager.sync_project_docs(&project_name).await?;
} else {
docs_manager.sync_all_docs().await?;
}
}
"list" => {
docs_manager.list_projects().await?;
}
"status" => {
docs_manager.show_docs_status().await?;
}
"session-end" => {
docs_manager.session_end_processing().await?;
}
_ => {
return Err(anyhow::anyhow!("Unknown docs action: {}", action));
}
}
Ok(())
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ProjectInfo {
pub name: String,
pub project_type: String,
pub description: String,
pub status: String,
pub features: Vec<String>,
pub dependencies: Vec<String>,
}
impl Default for ProjectInfo {
fn default() -> Self {
ProjectInfo {
name: String::new(),
project_type: String::new(),
description: String::new(),
status: "active".to_string(),
features: Vec::new(),
dependencies: Vec::new(),
}
}
}
pub struct DocsManager {
config: Config,
ai_root: PathBuf,
projects: HashMap<String, ProjectInfo>,
}
impl DocsManager {
pub fn new(config: Config) -> Self {
let ai_root = dirs::home_dir()
.unwrap_or_else(|| PathBuf::from("."))
.join("ai")
.join("ai");
DocsManager {
config,
ai_root,
projects: HashMap::new(),
}
}
pub async fn generate_project_docs(&mut self, project: &str, output: Option<PathBuf>, ai_integration: bool) -> Result<()> {
println!("{}", format!("📝 Generating documentation for project '{}'", project).cyan().bold());
// Load project information
let project_info = self.load_project_info(project)?;
// Generate documentation content
let mut content = self.generate_base_documentation(&project_info)?;
// AI enhancement if requested
if ai_integration {
println!("{}", "🤖 Enhancing documentation with AI...".blue());
if let Ok(enhanced_content) = self.enhance_with_ai(project, &content).await {
content = enhanced_content;
} else {
println!("{}", "Warning: AI enhancement failed, using base documentation".yellow());
}
}
// Determine output path
let output_path = if let Some(path) = output {
path
} else {
self.ai_root.join(project).join("claude.md")
};
// Ensure directory exists
if let Some(parent) = output_path.parent() {
std::fs::create_dir_all(parent)
.with_context(|| format!("Failed to create directory: {}", parent.display()))?;
}
// Write documentation
std::fs::write(&output_path, content)
.with_context(|| format!("Failed to write documentation to: {}", output_path.display()))?;
println!("{}", format!("✅ Documentation generated: {}", output_path.display()).green().bold());
Ok(())
}
pub async fn sync_project_docs(&self, project: &str) -> Result<()> {
println!("{}", format!("🔄 Syncing documentation for project '{}'", project).cyan().bold());
let claude_dir = self.ai_root.join("claude");
let project_dir = self.ai_root.join(project);
// Check if claude directory exists
if !claude_dir.exists() {
return Err(anyhow::anyhow!("Claude directory not found: {}", claude_dir.display()));
}
// Copy relevant files
let files_to_sync = vec!["README.md", "claude.md", "DEVELOPMENT.md"];
for file in files_to_sync {
let src = claude_dir.join("projects").join(format!("{}.md", project));
let dst = project_dir.join(file);
if src.exists() {
if let Some(parent) = dst.parent() {
std::fs::create_dir_all(parent)?;
}
std::fs::copy(&src, &dst)?;
println!(" ✓ Synced: {}", file.green());
}
}
println!("{}", "✅ Documentation sync completed".green().bold());
Ok(())
}
pub async fn sync_all_docs(&self) -> Result<()> {
println!("{}", "🔄 Syncing documentation for all projects...".cyan().bold());
// Find all project directories
let projects = self.discover_projects()?;
for project in projects {
println!("\n{}", format!("Syncing: {}", project).blue());
if let Err(e) = self.sync_project_docs(&project).await {
println!("{}: {}", "Warning".yellow(), e);
}
}
println!("\n{}", "✅ All projects synced".green().bold());
Ok(())
}
pub async fn list_projects(&mut self) -> Result<()> {
println!("{}", "📋 Available Projects".cyan().bold());
println!();
let projects = self.discover_projects()?;
if projects.is_empty() {
println!("{}", "No projects found".yellow());
return Ok(());
}
// Load project information
for project in &projects {
if let Ok(info) = self.load_project_info(project) {
self.projects.insert(project.clone(), info);
}
}
// Display projects in a table format
println!("{:<20} {:<15} {:<15} {}",
"Project".cyan().bold(),
"Type".cyan().bold(),
"Status".cyan().bold(),
"Description".cyan().bold());
println!("{}", "-".repeat(80));
let project_count = projects.len();
for project in &projects {
let info = self.projects.get(project).cloned().unwrap_or_default();
let status_color = match info.status.as_str() {
"active" => info.status.green(),
"development" => info.status.yellow(),
"deprecated" => info.status.red(),
_ => info.status.normal(),
};
println!("{:<20} {:<15} {:<15} {}",
project.blue(),
info.project_type,
status_color,
info.description);
}
println!();
println!("Total projects: {}", project_count.to_string().cyan());
Ok(())
}
pub async fn show_docs_status(&self) -> Result<()> {
println!("{}", "📊 Documentation Status".cyan().bold());
println!();
let projects = self.discover_projects()?;
let mut total_files = 0;
let mut total_lines = 0;
for project in projects {
let project_dir = self.ai_root.join(&project);
let claude_md = project_dir.join("claude.md");
if claude_md.exists() {
let content = std::fs::read_to_string(&claude_md)?;
let lines = content.lines().count();
let size = content.len();
println!("{}: {} lines, {} bytes",
project.blue(),
lines.to_string().yellow(),
size.to_string().yellow());
total_files += 1;
total_lines += lines;
} else {
println!("{}: {}", project.blue(), "No documentation".red());
}
}
println!();
println!("Summary: {} files, {} total lines",
total_files.to_string().cyan(),
total_lines.to_string().cyan());
Ok(())
}
fn discover_projects(&self) -> Result<Vec<String>> {
let mut projects = Vec::new();
// Known project directories
let known_projects = vec![
"gpt", "card", "bot", "shell", "os", "game", "moji", "verse"
];
for project in known_projects {
let project_dir = self.ai_root.join(project);
if project_dir.exists() && project_dir.is_dir() {
projects.push(project.to_string());
}
}
// Also scan for additional directories with ai.json
if self.ai_root.exists() {
for entry in std::fs::read_dir(&self.ai_root)? {
let entry = entry?;
let path = entry.path();
if path.is_dir() {
let ai_json = path.join("ai.json");
if ai_json.exists() {
if let Some(name) = path.file_name().and_then(|n| n.to_str()) {
if !projects.contains(&name.to_string()) {
projects.push(name.to_string());
}
}
}
}
}
}
projects.sort();
Ok(projects)
}
fn load_project_info(&self, project: &str) -> Result<ProjectInfo> {
let ai_json_path = self.ai_root.join(project).join("ai.json");
if ai_json_path.exists() {
let content = std::fs::read_to_string(&ai_json_path)?;
if let Ok(json_data) = serde_json::from_str::<serde_json::Value>(&content) {
let mut info = ProjectInfo::default();
info.name = project.to_string();
if let Some(project_data) = json_data.get(project) {
if let Some(type_str) = project_data.get("type").and_then(|v| v.as_str()) {
info.project_type = type_str.to_string();
}
if let Some(desc) = project_data.get("description").and_then(|v| v.as_str()) {
info.description = desc.to_string();
}
}
return Ok(info);
}
}
// Default project info based on known projects
let mut info = ProjectInfo::default();
info.name = project.to_string();
match project {
"gpt" => {
info.project_type = "AI".to_string();
info.description = "Autonomous transmission AI with unique personality".to_string();
}
"card" => {
info.project_type = "Game".to_string();
info.description = "Card game system with atproto integration".to_string();
}
"bot" => {
info.project_type = "Bot".to_string();
info.description = "Distributed SNS bot for AI ecosystem".to_string();
}
"shell" => {
info.project_type = "Tool".to_string();
info.description = "AI-powered shell interface".to_string();
}
"os" => {
info.project_type = "OS".to_string();
info.description = "Game-oriented operating system".to_string();
}
"verse" => {
info.project_type = "Metaverse".to_string();
info.description = "Reality-reflecting 3D world system".to_string();
}
_ => {
info.project_type = "Unknown".to_string();
info.description = format!("AI ecosystem project: {}", project);
}
}
Ok(info)
}
fn generate_base_documentation(&self, project_info: &ProjectInfo) -> Result<String> {
let timestamp = Utc::now().format("%Y-%m-%d %H:%M:%S UTC");
let mut content = String::new();
content.push_str(&format!("# {}\n\n", project_info.name));
content.push_str(&format!("## Overview\n\n"));
content.push_str(&format!("**Type**: {}\n\n", project_info.project_type));
content.push_str(&format!("**Description**: {}\n\n", project_info.description));
content.push_str(&format!("**Status**: {}\n\n", project_info.status));
if !project_info.features.is_empty() {
content.push_str("## Features\n\n");
for feature in &project_info.features {
content.push_str(&format!("- {}\n", feature));
}
content.push_str("\n");
}
content.push_str("## Architecture\n\n");
content.push_str("This project is part of the ai ecosystem, following the core principles:\n\n");
content.push_str("- **Existence Theory**: Based on the exploration of the smallest units (ai/existon)\n");
content.push_str("- **Uniqueness Principle**: Ensuring 1:1 mapping between reality and digital existence\n");
content.push_str("- **Reality Reflection**: Creating circular influence between reality and game\n\n");
content.push_str("## Development\n\n");
content.push_str("### Getting Started\n\n");
content.push_str("```bash\n");
content.push_str(&format!("# Clone the repository\n"));
content.push_str(&format!("git clone https://git.syui.ai/ai/{}\n", project_info.name));
content.push_str(&format!("cd {}\n", project_info.name));
content.push_str("```\n\n");
content.push_str("### Configuration\n\n");
content.push_str(&format!("Configuration files are stored in `~/.config/syui/ai/{}/`\n\n", project_info.name));
content.push_str("## Integration\n\n");
content.push_str("This project integrates with other ai ecosystem components:\n\n");
if !project_info.dependencies.is_empty() {
for dep in &project_info.dependencies {
content.push_str(&format!("- **{}**: Core dependency\n", dep));
}
} else {
content.push_str("- **ai.gpt**: Core AI personality system\n");
content.push_str("- **atproto**: Distributed identity and data\n");
}
content.push_str("\n");
content.push_str("---\n\n");
content.push_str(&format!("*Generated: {}*\n", timestamp));
content.push_str("*🤖 Generated with [Claude Code](https://claude.ai/code)*\n");
Ok(content)
}
async fn enhance_with_ai(&self, project: &str, base_content: &str) -> Result<String> {
// Create AI provider
let ai_config = AIConfig {
provider: AIProvider::Ollama,
model: "llama2".to_string(),
api_key: None,
base_url: None,
max_tokens: Some(2000),
temperature: Some(0.7),
};
let _ai_provider = AIProviderClient::new(ai_config);
let mut persona = Persona::new(&self.config)?;
let enhancement_prompt = format!(
"As an AI documentation expert, enhance the following documentation for project '{}'.
Current documentation:
{}
Please provide enhanced content that includes:
1. More detailed project description
2. Key features and capabilities
3. Usage examples
4. Integration points with other AI ecosystem projects
5. Development workflow recommendations
Keep the same structure but expand and improve the content.",
project, base_content
);
// Try to get AI response
let (response, _) = persona.process_ai_interaction(
"docs_system",
&enhancement_prompt,
Some("ollama".to_string()),
Some("llama2".to_string())
).await?;
// If AI response is substantial, use it; otherwise fall back to base content
if response.len() > base_content.len() / 2 {
Ok(response)
} else {
Ok(base_content.to_string())
}
}
/// セッション終了時の処理(ドキュメント記録・同期)
pub async fn session_end_processing(&mut self) -> Result<()> {
println!("{}", "🔄 Session end processing started...".cyan());
// 1. 現在のプロジェクト状況を記録
println!("📊 Recording current project status...");
self.record_session_summary().await?;
// 2. 全プロジェクトのドキュメント同期
println!("🔄 Syncing all project documentation...");
self.sync_all_docs().await?;
// 3. READMEの自動更新
println!("📝 Updating project README files...");
self.update_project_readmes().await?;
// 4. メタデータの更新
println!("🏷️ Updating project metadata...");
self.update_project_metadata().await?;
println!("{}", "✅ Session end processing completed!".green());
Ok(())
}
/// セッション概要を記録
async fn record_session_summary(&self) -> Result<()> {
let session_log_path = self.ai_root.join("session_logs");
std::fs::create_dir_all(&session_log_path)?;
let timestamp = Utc::now().format("%Y-%m-%d_%H-%M-%S");
let log_file = session_log_path.join(format!("session_{}.md", timestamp));
let summary = format!(
"# Session Summary - {}\n\n\
## Timestamp\n{}\n\n\
## Projects Status\n{}\n\n\
## Next Actions\n- Documentation sync completed\n- README files updated\n- Metadata refreshed\n\n\
---\n*Generated by aigpt session-end processing*\n",
timestamp,
Utc::now().format("%Y-%m-%d %H:%M:%S UTC"),
self.generate_projects_status().await.unwrap_or_else(|_| "Status unavailable".to_string())
);
std::fs::write(log_file, summary)?;
Ok(())
}
/// プロジェクト状況を生成
async fn generate_projects_status(&self) -> Result<String> {
let projects = self.discover_projects()?;
let mut status = String::new();
for project in projects {
let claude_md = self.ai_root.join(&project).join("claude.md");
let readme_md = self.ai_root.join(&project).join("README.md");
status.push_str(&format!("- **{}**: ", project));
if claude_md.exists() {
status.push_str("claude.md ✅ ");
} else {
status.push_str("claude.md ❌ ");
}
if readme_md.exists() {
status.push_str("README.md ✅");
} else {
status.push_str("README.md ❌");
}
status.push('\n');
}
Ok(status)
}
/// プロジェクトREADMEファイルの更新
async fn update_project_readmes(&self) -> Result<()> {
let projects = self.discover_projects()?;
for project in projects {
let readme_path = self.ai_root.join(&project).join("README.md");
let claude_md_path = self.ai_root.join(&project).join("claude.md");
// claude.mdが存在する場合、READMEに同期
if claude_md_path.exists() {
let claude_content = std::fs::read_to_string(&claude_md_path)?;
// READMEが存在しない場合は新規作成
if !readme_path.exists() {
println!("📝 Creating README.md for {}", project);
std::fs::write(&readme_path, &claude_content)?;
} else {
// 既存READMEがclaude.mdより古い場合は更新
let readme_metadata = std::fs::metadata(&readme_path)?;
let claude_metadata = std::fs::metadata(&claude_md_path)?;
if claude_metadata.modified()? > readme_metadata.modified()? {
println!("🔄 Updating README.md for {}", project);
std::fs::write(&readme_path, &claude_content)?;
}
}
}
}
Ok(())
}
/// プロジェクトメタデータの更新
async fn update_project_metadata(&self) -> Result<()> {
let projects = self.discover_projects()?;
for project in projects {
let ai_json_path = self.ai_root.join(&project).join("ai.json");
if ai_json_path.exists() {
let mut content = std::fs::read_to_string(&ai_json_path)?;
let mut json_data: serde_json::Value = serde_json::from_str(&content)?;
// last_updated フィールドを更新
if let Some(project_data) = json_data.get_mut(&project) {
if let Some(obj) = project_data.as_object_mut() {
obj.insert("last_updated".to_string(),
serde_json::Value::String(Utc::now().to_rfc3339()));
obj.insert("status".to_string(),
serde_json::Value::String("active".to_string()));
content = serde_json::to_string_pretty(&json_data)?;
std::fs::write(&ai_json_path, content)?;
}
}
}
}
Ok(())
}
}

274
src/http_client.rs Normal file
View File

@@ -0,0 +1,274 @@
use anyhow::{anyhow, Result};
use reqwest::Client;
use serde::{Deserialize, Serialize};
use serde_json::Value;
use std::time::Duration;
use url::Url;
/// HTTP client for inter-service communication
pub struct ServiceClient {
client: Client,
}
impl ServiceClient {
pub fn new() -> Self {
let client = Client::builder()
.timeout(Duration::from_secs(30))
.build()
.expect("Failed to create HTTP client");
Self { client }
}
/// Check if a service is available
pub async fn check_service_status(&self, base_url: &str) -> Result<ServiceStatus> {
let url = format!("{}/health", base_url.trim_end_matches('/'));
match self.client.get(&url).send().await {
Ok(response) => {
if response.status().is_success() {
Ok(ServiceStatus::Available)
} else {
Ok(ServiceStatus::Error(format!("HTTP {}", response.status())))
}
}
Err(e) => Ok(ServiceStatus::Unavailable(e.to_string())),
}
}
/// Make a GET request to a service
pub async fn get_request(&self, url: &str) -> Result<Value> {
let response = self.client
.get(url)
.send()
.await?;
if !response.status().is_success() {
return Err(anyhow!("Request failed with status: {}", response.status()));
}
let json: Value = response.json().await?;
Ok(json)
}
/// Make a POST request to a service
pub async fn post_request(&self, url: &str, body: &Value) -> Result<Value> {
let response = self.client
.post(url)
.header("Content-Type", "application/json")
.json(body)
.send()
.await?;
if !response.status().is_success() {
return Err(anyhow!("Request failed with status: {}", response.status()));
}
let json: Value = response.json().await?;
Ok(json)
}
}
/// Service status enum
#[derive(Debug, Clone)]
pub enum ServiceStatus {
Available,
Unavailable(String),
Error(String),
}
impl ServiceStatus {
pub fn is_available(&self) -> bool {
matches!(self, ServiceStatus::Available)
}
}
/// Service detector for ai ecosystem services
pub struct ServiceDetector {
client: ServiceClient,
}
impl ServiceDetector {
pub fn new() -> Self {
Self {
client: ServiceClient::new(),
}
}
/// Check all ai ecosystem services
pub async fn detect_services(&self) -> ServiceMap {
let mut services = ServiceMap::default();
// Check ai.card service
if let Ok(status) = self.client.check_service_status("http://localhost:8000").await {
services.ai_card = Some(ServiceInfo {
base_url: "http://localhost:8000".to_string(),
status,
});
}
// Check ai.log service
if let Ok(status) = self.client.check_service_status("http://localhost:8001").await {
services.ai_log = Some(ServiceInfo {
base_url: "http://localhost:8001".to_string(),
status,
});
}
// Check ai.bot service
if let Ok(status) = self.client.check_service_status("http://localhost:8002").await {
services.ai_bot = Some(ServiceInfo {
base_url: "http://localhost:8002".to_string(),
status,
});
}
services
}
/// Get available services only
pub async fn get_available_services(&self) -> Vec<String> {
let services = self.detect_services().await;
let mut available = Vec::new();
if let Some(card) = &services.ai_card {
if card.status.is_available() {
available.push("ai.card".to_string());
}
}
if let Some(log) = &services.ai_log {
if log.status.is_available() {
available.push("ai.log".to_string());
}
}
if let Some(bot) = &services.ai_bot {
if bot.status.is_available() {
available.push("ai.bot".to_string());
}
}
available
}
/// Get card collection statistics
pub async fn get_card_stats(&self) -> Result<serde_json::Value, Box<dyn std::error::Error>> {
match self.client.get_request("http://localhost:8000/api/v1/cards/gacha-stats").await {
Ok(stats) => Ok(stats),
Err(e) => Err(e.into()),
}
}
/// Draw a card for user
pub async fn draw_card(&self, user_did: &str, is_paid: bool) -> Result<serde_json::Value, Box<dyn std::error::Error>> {
let payload = serde_json::json!({
"user_did": user_did,
"is_paid": is_paid
});
match self.client.post_request("http://localhost:8000/api/v1/cards/draw", &payload).await {
Ok(card) => Ok(card),
Err(e) => Err(e.into()),
}
}
/// Get user's card collection
pub async fn get_user_cards(&self, user_did: &str) -> Result<serde_json::Value, Box<dyn std::error::Error>> {
let url = format!("http://localhost:8000/api/v1/cards/collection?did={}", user_did);
match self.client.get_request(&url).await {
Ok(collection) => Ok(collection),
Err(e) => Err(e.into()),
}
}
/// Get contextual memories for conversation mode
pub async fn get_contextual_memories(&self, _user_id: &str, _limit: usize) -> Result<Vec<crate::memory::Memory>, Box<dyn std::error::Error>> {
// This is a simplified version - in a real implementation this would call the MCP server
// For now, we'll return an empty vec to make compilation work
Ok(Vec::new())
}
/// Search memories by query
pub async fn search_memories(&self, _query: &str, _limit: usize) -> Result<Vec<crate::memory::Memory>, Box<dyn std::error::Error>> {
// This is a simplified version - in a real implementation this would call the MCP server
// For now, we'll return an empty vec to make compilation work
Ok(Vec::new())
}
/// Create context summary
pub async fn create_summary(&self, user_id: &str) -> Result<String, Box<dyn std::error::Error>> {
// This is a simplified version - in a real implementation this would call the MCP server
// For now, we'll return a placeholder summary
Ok(format!("Context summary for user: {}", user_id))
}
}
/// Service information
#[derive(Debug, Clone)]
pub struct ServiceInfo {
pub base_url: String,
pub status: ServiceStatus,
}
/// Map of all ai ecosystem services
#[derive(Debug, Clone, Default)]
pub struct ServiceMap {
pub ai_card: Option<ServiceInfo>,
pub ai_log: Option<ServiceInfo>,
pub ai_bot: Option<ServiceInfo>,
}
impl ServiceMap {
/// Get service info by name
pub fn get_service(&self, name: &str) -> Option<&ServiceInfo> {
match name {
"ai.card" => self.ai_card.as_ref(),
"ai.log" => self.ai_log.as_ref(),
"ai.bot" => self.ai_bot.as_ref(),
_ => None,
}
}
/// Check if a service is available
pub fn is_service_available(&self, name: &str) -> bool {
self.get_service(name)
.map(|info| info.status.is_available())
.unwrap_or(false)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[tokio::test]
async fn test_service_client_creation() {
let client = ServiceClient::new();
// Basic test to ensure client can be created
assert!(true);
}
#[test]
fn test_service_status() {
let status = ServiceStatus::Available;
assert!(status.is_available());
let status = ServiceStatus::Unavailable("Connection refused".to_string());
assert!(!status.is_available());
}
#[test]
fn test_service_map() {
let mut map = ServiceMap::default();
assert!(!map.is_service_available("ai.card"));
map.ai_card = Some(ServiceInfo {
base_url: "http://localhost:8000".to_string(),
status: ServiceStatus::Available,
});
assert!(map.is_service_available("ai.card"));
assert!(!map.is_service_available("ai.log"));
}
}

331
src/import.rs Normal file
View File

@@ -0,0 +1,331 @@
use std::collections::HashMap;
use std::path::PathBuf;
use serde::Deserialize;
use anyhow::{Result, Context};
use colored::*;
use chrono::{DateTime, Utc};
use crate::config::Config;
use crate::persona::Persona;
use crate::memory::{Memory, MemoryType};
pub async fn handle_import_chatgpt(
file_path: PathBuf,
user_id: Option<String>,
data_dir: Option<PathBuf>,
) -> Result<()> {
let config = Config::new(data_dir)?;
let mut persona = Persona::new(&config)?;
let user_id = user_id.unwrap_or_else(|| "imported_user".to_string());
println!("{}", "🚀 Starting ChatGPT Import...".cyan().bold());
println!("File: {}", file_path.display().to_string().yellow());
println!("User ID: {}", user_id.yellow());
println!();
let mut importer = ChatGPTImporter::new(user_id);
let stats = importer.import_from_file(&file_path, &mut persona).await?;
// Display import statistics
println!("\n{}", "📊 Import Statistics".green().bold());
println!("Conversations imported: {}", stats.conversations_imported.to_string().cyan());
println!("Messages imported: {}", stats.messages_imported.to_string().cyan());
println!(" - User messages: {}", stats.user_messages.to_string().yellow());
println!(" - Assistant messages: {}", stats.assistant_messages.to_string().yellow());
if stats.skipped_messages > 0 {
println!(" - Skipped messages: {}", stats.skipped_messages.to_string().red());
}
// Show updated relationship
if let Some(relationship) = persona.get_relationship(&importer.user_id) {
println!("\n{}", "👥 Updated Relationship".blue().bold());
println!("Status: {}", relationship.status.to_string().yellow());
println!("Score: {:.2} / {}", relationship.score, relationship.threshold);
println!("Transmission enabled: {}",
if relationship.transmission_enabled { "".green() } else { "".red() });
}
println!("\n{}", "✅ ChatGPT import completed successfully!".green().bold());
Ok(())
}
#[derive(Debug, Clone)]
pub struct ImportStats {
pub conversations_imported: usize,
pub messages_imported: usize,
pub user_messages: usize,
pub assistant_messages: usize,
pub skipped_messages: usize,
}
impl Default for ImportStats {
fn default() -> Self {
ImportStats {
conversations_imported: 0,
messages_imported: 0,
user_messages: 0,
assistant_messages: 0,
skipped_messages: 0,
}
}
}
pub struct ChatGPTImporter {
user_id: String,
stats: ImportStats,
}
impl ChatGPTImporter {
pub fn new(user_id: String) -> Self {
ChatGPTImporter {
user_id,
stats: ImportStats::default(),
}
}
pub async fn import_from_file(&mut self, file_path: &PathBuf, persona: &mut Persona) -> Result<ImportStats> {
// Read and parse the JSON file
let content = std::fs::read_to_string(file_path)
.with_context(|| format!("Failed to read file: {}", file_path.display()))?;
let conversations: Vec<ChatGPTConversation> = serde_json::from_str(&content)
.context("Failed to parse ChatGPT export JSON")?;
println!("Found {} conversations to import", conversations.len());
// Import each conversation
for (i, conversation) in conversations.iter().enumerate() {
if i % 10 == 0 && i > 0 {
println!("Processed {} / {} conversations...", i, conversations.len());
}
match self.import_single_conversation(conversation, persona).await {
Ok(_) => {
self.stats.conversations_imported += 1;
}
Err(e) => {
println!("{}: Failed to import conversation '{}': {}",
"Warning".yellow(),
conversation.title.as_deref().unwrap_or("Untitled"),
e);
}
}
}
Ok(self.stats.clone())
}
async fn import_single_conversation(&mut self, conversation: &ChatGPTConversation, persona: &mut Persona) -> Result<()> {
// Extract messages from the mapping structure
let messages = self.extract_messages_from_mapping(&conversation.mapping)?;
if messages.is_empty() {
return Ok(());
}
// Process each message
for message in messages {
match self.process_message(&message, persona).await {
Ok(_) => {
self.stats.messages_imported += 1;
}
Err(_) => {
self.stats.skipped_messages += 1;
}
}
}
Ok(())
}
fn extract_messages_from_mapping(&self, mapping: &HashMap<String, ChatGPTNode>) -> Result<Vec<ChatGPTMessage>> {
let mut messages = Vec::new();
// Find all message nodes and collect them
for node in mapping.values() {
if let Some(message) = &node.message {
// Skip system messages and other non-user/assistant messages
if let Some(role) = &message.author.role {
match role.as_str() {
"user" | "assistant" => {
if let Some(content) = &message.content {
let content_text = if content.content_type == "text" && !content.parts.is_empty() {
// Extract text from parts (handle both strings and mixed content)
content.parts.iter()
.filter_map(|part| part.as_str())
.collect::<Vec<&str>>()
.join("\n")
} else if content.content_type == "multimodal_text" {
// Extract text parts from multimodal content
let mut text_parts = Vec::new();
for part in &content.parts {
if let Some(text) = part.as_str() {
if !text.is_empty() {
text_parts.push(text);
}
}
// Skip non-text parts (like image_asset_pointer)
}
if text_parts.is_empty() {
continue; // Skip if no text content
}
text_parts.join("\n")
} else if content.content_type == "user_editable_context" {
// Handle user context messages
if let Some(instructions) = &content.user_instructions {
format!("User instructions: {}", instructions)
} else if let Some(profile) = &content.user_profile {
format!("User profile: {}", profile)
} else {
continue; // Skip empty context messages
}
} else {
continue; // Skip other content types for now
};
if !content_text.trim().is_empty() {
messages.push(ChatGPTMessage {
role: role.clone(),
content: content_text,
create_time: message.create_time,
});
}
}
}
_ => {} // Skip system, tool, etc.
}
}
}
}
// Sort messages by creation time
messages.sort_by(|a, b| {
let time_a = a.create_time.unwrap_or(0.0);
let time_b = b.create_time.unwrap_or(0.0);
time_a.partial_cmp(&time_b).unwrap_or(std::cmp::Ordering::Equal)
});
Ok(messages)
}
async fn process_message(&mut self, message: &ChatGPTMessage, persona: &mut Persona) -> Result<()> {
let timestamp = self.convert_timestamp(message.create_time.unwrap_or(0.0))?;
match message.role.as_str() {
"user" => {
self.add_user_message(&message.content, timestamp, persona)?;
self.stats.user_messages += 1;
}
"assistant" => {
self.add_assistant_message(&message.content, timestamp, persona)?;
self.stats.assistant_messages += 1;
}
_ => {
return Err(anyhow::anyhow!("Unsupported message role: {}", message.role));
}
}
Ok(())
}
fn add_user_message(&self, content: &str, timestamp: DateTime<Utc>, persona: &mut Persona) -> Result<()> {
// Create high-importance memory for user messages
let memory = Memory {
id: uuid::Uuid::new_v4().to_string(),
user_id: self.user_id.clone(),
content: content.to_string(),
summary: None,
importance: 0.8, // High importance for imported user data
memory_type: MemoryType::Core,
created_at: timestamp,
last_accessed: timestamp,
access_count: 1,
};
// Add memory and update relationship
persona.add_memory(memory)?;
persona.update_relationship(&self.user_id, 1.0)?; // Positive relationship boost
Ok(())
}
fn add_assistant_message(&self, content: &str, timestamp: DateTime<Utc>, persona: &mut Persona) -> Result<()> {
// Create medium-importance memory for assistant responses
let memory = Memory {
id: uuid::Uuid::new_v4().to_string(),
user_id: self.user_id.clone(),
content: format!("[AI Response] {}", content),
summary: Some("Imported ChatGPT response".to_string()),
importance: 0.6, // Medium importance for AI responses
memory_type: MemoryType::Summary,
created_at: timestamp,
last_accessed: timestamp,
access_count: 1,
};
persona.add_memory(memory)?;
Ok(())
}
fn convert_timestamp(&self, unix_timestamp: f64) -> Result<DateTime<Utc>> {
if unix_timestamp <= 0.0 {
return Ok(Utc::now());
}
DateTime::from_timestamp(
unix_timestamp as i64,
((unix_timestamp % 1.0) * 1_000_000_000.0) as u32
).ok_or_else(|| anyhow::anyhow!("Invalid timestamp: {}", unix_timestamp))
}
}
// ChatGPT Export Data Structures
#[derive(Debug, Deserialize)]
pub struct ChatGPTConversation {
pub title: Option<String>,
pub create_time: Option<f64>,
pub mapping: HashMap<String, ChatGPTNode>,
}
#[derive(Debug, Deserialize)]
pub struct ChatGPTNode {
pub id: Option<String>,
pub message: Option<ChatGPTNodeMessage>,
pub parent: Option<String>,
pub children: Vec<String>,
}
#[derive(Debug, Deserialize)]
pub struct ChatGPTNodeMessage {
pub id: String,
pub author: ChatGPTAuthor,
pub create_time: Option<f64>,
pub content: Option<ChatGPTContent>,
}
#[derive(Debug, Deserialize)]
pub struct ChatGPTAuthor {
pub role: Option<String>,
pub name: Option<String>,
}
#[derive(Debug, Deserialize)]
pub struct ChatGPTContent {
pub content_type: String,
#[serde(default)]
pub parts: Vec<serde_json::Value>,
#[serde(default)]
pub user_profile: Option<String>,
#[serde(default)]
pub user_instructions: Option<String>,
}
// Simplified message structure for processing
#[derive(Debug, Clone)]
pub struct ChatGPTMessage {
pub role: String,
pub content: String,
pub create_time: Option<f64>,
}

17
src/lib.rs Normal file
View File

@@ -0,0 +1,17 @@
pub mod ai_provider;
pub mod cli;
pub mod config;
pub mod conversation;
pub mod docs;
pub mod http_client;
pub mod import;
pub mod mcp_server;
pub mod memory;
pub mod persona;
pub mod relationship;
pub mod scheduler;
pub mod shell;
pub mod status;
pub mod submodules;
pub mod tokens;
pub mod transmission;

248
src/main.rs Normal file
View File

@@ -0,0 +1,248 @@
use clap::{Parser, Subcommand};
use std::path::PathBuf;
mod ai_provider;
mod cli;
use cli::TokenCommands;
mod config;
mod conversation;
mod docs;
mod http_client;
mod import;
mod mcp_server;
mod memory;
mod persona;
mod relationship;
mod scheduler;
mod shell;
mod status;
mod submodules;
mod tokens;
mod transmission;
#[derive(Parser)]
#[command(name = "aigpt")]
#[command(about = "AI.GPT - Autonomous transmission AI with unique personality")]
#[command(version)]
struct Cli {
#[command(subcommand)]
command: Commands,
}
#[derive(Subcommand)]
enum Commands {
/// Check AI status and relationships
Status {
/// User ID to check status for
user_id: Option<String>,
/// Data directory
#[arg(short, long)]
data_dir: Option<PathBuf>,
},
/// Chat with the AI
Chat {
/// User ID (atproto DID)
user_id: String,
/// Message to send to AI
message: String,
/// Data directory
#[arg(short, long)]
data_dir: Option<PathBuf>,
/// AI model to use
#[arg(short, long)]
model: Option<String>,
/// AI provider (ollama/openai)
#[arg(long)]
provider: Option<String>,
},
/// Start continuous conversation mode with MCP integration
Conversation {
/// User ID (atproto DID)
user_id: String,
/// Data directory
#[arg(short, long)]
data_dir: Option<PathBuf>,
/// AI model to use
#[arg(short, long)]
model: Option<String>,
/// AI provider (ollama/openai)
#[arg(long)]
provider: Option<String>,
},
/// Start continuous conversation mode with MCP integration (alias)
Conv {
/// User ID (atproto DID)
user_id: String,
/// Data directory
#[arg(short, long)]
data_dir: Option<PathBuf>,
/// AI model to use
#[arg(short, long)]
model: Option<String>,
/// AI provider (ollama/openai)
#[arg(long)]
provider: Option<String>,
},
/// Check today's AI fortune
Fortune {
/// Data directory
#[arg(short, long)]
data_dir: Option<PathBuf>,
},
/// List all relationships
Relationships {
/// Data directory
#[arg(short, long)]
data_dir: Option<PathBuf>,
},
/// Check and send autonomous transmissions
Transmit {
/// Data directory
#[arg(short, long)]
data_dir: Option<PathBuf>,
},
/// Run daily maintenance tasks
Maintenance {
/// Data directory
#[arg(short, long)]
data_dir: Option<PathBuf>,
},
/// Run scheduled tasks
Schedule {
/// Data directory
#[arg(short, long)]
data_dir: Option<PathBuf>,
},
/// Start MCP server
Server {
/// Port to listen on
#[arg(short, long, default_value = "8080")]
port: u16,
/// Data directory
#[arg(short, long)]
data_dir: Option<PathBuf>,
},
/// Interactive shell mode
Shell {
/// User ID (atproto DID)
user_id: String,
/// Data directory
#[arg(short, long)]
data_dir: Option<PathBuf>,
/// AI model to use
#[arg(short, long)]
model: Option<String>,
/// AI provider (ollama/openai)
#[arg(long)]
provider: Option<String>,
},
/// Import ChatGPT conversation data
ImportChatgpt {
/// Path to ChatGPT export JSON file
file_path: PathBuf,
/// User ID for imported conversations
#[arg(short, long)]
user_id: Option<String>,
/// Data directory
#[arg(short, long)]
data_dir: Option<PathBuf>,
},
/// Documentation management
Docs {
/// Action to perform (generate, sync, list, status)
action: String,
/// Project name for generate/sync actions
#[arg(short, long)]
project: Option<String>,
/// Output path for generated documentation
#[arg(short, long)]
output: Option<PathBuf>,
/// Enable AI integration for documentation enhancement
#[arg(long)]
ai_integration: bool,
/// Data directory
#[arg(short, long)]
data_dir: Option<PathBuf>,
},
/// Submodule management
Submodules {
/// Action to perform (list, update, status)
action: String,
/// Specific module to update
#[arg(short, long)]
module: Option<String>,
/// Update all submodules
#[arg(long)]
all: bool,
/// Show what would be done without making changes
#[arg(long)]
dry_run: bool,
/// Auto-commit changes after update
#[arg(long)]
auto_commit: bool,
/// Show verbose output
#[arg(short, long)]
verbose: bool,
/// Data directory
#[arg(short, long)]
data_dir: Option<PathBuf>,
},
/// Token usage analysis and cost estimation
Tokens {
#[command(subcommand)]
command: TokenCommands,
},
}
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let cli = Cli::parse();
match cli.command {
Commands::Status { user_id, data_dir } => {
status::handle_status(user_id, data_dir).await
}
Commands::Chat { user_id, message, data_dir, model, provider } => {
cli::handle_chat(user_id, message, data_dir, model, provider).await
}
Commands::Conversation { user_id, data_dir, model, provider } => {
conversation::handle_conversation(user_id, data_dir, model, provider).await
}
Commands::Conv { user_id, data_dir, model, provider } => {
conversation::handle_conversation(user_id, data_dir, model, provider).await
}
Commands::Fortune { data_dir } => {
cli::handle_fortune(data_dir).await
}
Commands::Relationships { data_dir } => {
cli::handle_relationships(data_dir).await
}
Commands::Transmit { data_dir } => {
cli::handle_transmit(data_dir).await
}
Commands::Maintenance { data_dir } => {
cli::handle_maintenance(data_dir).await
}
Commands::Schedule { data_dir } => {
cli::handle_schedule(data_dir).await
}
Commands::Server { port, data_dir } => {
cli::handle_server(Some(port), data_dir).await
}
Commands::Shell { user_id, data_dir, model, provider } => {
shell::handle_shell(user_id, data_dir, model, provider).await
}
Commands::ImportChatgpt { file_path, user_id, data_dir } => {
import::handle_import_chatgpt(file_path, user_id, data_dir).await
}
Commands::Docs { action, project, output, ai_integration, data_dir } => {
docs::handle_docs(action, project, output, ai_integration, data_dir).await
}
Commands::Submodules { action, module, all, dry_run, auto_commit, verbose, data_dir } => {
submodules::handle_submodules(action, module, all, dry_run, auto_commit, verbose, data_dir).await
}
Commands::Tokens { command } => {
tokens::handle_tokens(command).await
}
}
}

1742
src/mcp_server.rs Normal file

File diff suppressed because it is too large Load Diff

307
src/memory.rs Normal file
View File

@@ -0,0 +1,307 @@
use std::collections::HashMap;
use serde::{Deserialize, Serialize};
use anyhow::{Result, Context};
use chrono::{DateTime, Utc};
use uuid::Uuid;
use crate::config::Config;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Memory {
pub id: String,
pub user_id: String,
pub content: String,
pub summary: Option<String>,
pub importance: f64,
pub memory_type: MemoryType,
pub created_at: DateTime<Utc>,
pub last_accessed: DateTime<Utc>,
pub access_count: u32,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum MemoryType {
Interaction,
Summary,
Core,
Forgotten,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct MemoryManager {
memories: HashMap<String, Memory>,
config: Config,
}
impl MemoryManager {
pub fn new(config: &Config) -> Result<Self> {
let memories = Self::load_memories(config)?;
Ok(MemoryManager {
memories,
config: config.clone(),
})
}
pub fn add_memory(&mut self, user_id: &str, content: &str, importance: f64) -> Result<String> {
let memory_id = Uuid::new_v4().to_string();
let now = Utc::now();
let memory = Memory {
id: memory_id.clone(),
user_id: user_id.to_string(),
content: content.to_string(),
summary: None,
importance,
memory_type: MemoryType::Interaction,
created_at: now,
last_accessed: now,
access_count: 1,
};
self.memories.insert(memory_id.clone(), memory);
self.save_memories()?;
Ok(memory_id)
}
pub fn get_memories(&mut self, user_id: &str, limit: usize) -> Vec<&Memory> {
// Get immutable references for sorting
let mut user_memory_ids: Vec<_> = self.memories
.iter()
.filter(|(_, m)| m.user_id == user_id)
.map(|(id, memory)| {
let score = memory.importance * 0.7 + (1.0 / ((Utc::now() - memory.created_at).num_hours() as f64 + 1.0)) * 0.3;
(id.clone(), score)
})
.collect();
// Sort by score
user_memory_ids.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap_or(std::cmp::Ordering::Equal));
// Update access information and collect references
let now = Utc::now();
let mut result: Vec<&Memory> = Vec::new();
for (memory_id, _) in user_memory_ids.into_iter().take(limit) {
if let Some(memory) = self.memories.get_mut(&memory_id) {
memory.last_accessed = now;
memory.access_count += 1;
// We can't return mutable references here, so we'll need to adjust the return type
}
}
// Return immutable references
self.memories
.values()
.filter(|m| m.user_id == user_id)
.take(limit)
.collect()
}
pub fn search_memories(&self, user_id: &str, keywords: &[String]) -> Vec<&Memory> {
self.memories
.values()
.filter(|m| {
m.user_id == user_id &&
keywords.iter().any(|keyword| {
m.content.to_lowercase().contains(&keyword.to_lowercase()) ||
m.summary.as_ref().map_or(false, |s| s.to_lowercase().contains(&keyword.to_lowercase()))
})
})
.collect()
}
pub fn get_contextual_memories(&self, user_id: &str, query: &str, limit: usize) -> Vec<&Memory> {
let query_lower = query.to_lowercase();
let mut relevant_memories: Vec<_> = self.memories
.values()
.filter(|m| {
m.user_id == user_id && (
m.content.to_lowercase().contains(&query_lower) ||
m.summary.as_ref().map_or(false, |s| s.to_lowercase().contains(&query_lower))
)
})
.collect();
// Sort by relevance (simple keyword matching for now)
relevant_memories.sort_by(|a, b| {
let score_a = Self::calculate_relevance_score(a, &query_lower);
let score_b = Self::calculate_relevance_score(b, &query_lower);
score_b.partial_cmp(&score_a).unwrap_or(std::cmp::Ordering::Equal)
});
relevant_memories.into_iter().take(limit).collect()
}
fn calculate_relevance_score(memory: &Memory, query: &str) -> f64 {
let content_matches = memory.content.to_lowercase().matches(query).count() as f64;
let summary_matches = memory.summary.as_ref()
.map_or(0.0, |s| s.to_lowercase().matches(query).count() as f64);
let relevance = (content_matches + summary_matches) * memory.importance;
let recency_bonus = 1.0 / ((Utc::now() - memory.created_at).num_days() as f64).max(1.0);
relevance + recency_bonus * 0.1
}
pub fn create_summary(&mut self, user_id: &str, content: &str) -> Result<String> {
// Simple summary creation (in real implementation, this would use AI)
let summary = if content.len() > 100 {
format!("{}...", &content[..97])
} else {
content.to_string()
};
self.add_memory(user_id, &summary, 0.8)
}
pub fn create_core_memory(&mut self, user_id: &str, content: &str) -> Result<String> {
let memory_id = Uuid::new_v4().to_string();
let now = Utc::now();
let memory = Memory {
id: memory_id.clone(),
user_id: user_id.to_string(),
content: content.to_string(),
summary: None,
importance: 1.0, // Core memories have maximum importance
memory_type: MemoryType::Core,
created_at: now,
last_accessed: now,
access_count: 1,
};
self.memories.insert(memory_id.clone(), memory);
self.save_memories()?;
Ok(memory_id)
}
pub fn get_memory_stats(&self, user_id: &str) -> MemoryStats {
let user_memories: Vec<_> = self.memories
.values()
.filter(|m| m.user_id == user_id)
.collect();
let total_memories = user_memories.len();
let core_memories = user_memories.iter()
.filter(|m| matches!(m.memory_type, MemoryType::Core))
.count();
let summary_memories = user_memories.iter()
.filter(|m| matches!(m.memory_type, MemoryType::Summary))
.count();
let interaction_memories = user_memories.iter()
.filter(|m| matches!(m.memory_type, MemoryType::Interaction))
.count();
let avg_importance = if total_memories > 0 {
user_memories.iter().map(|m| m.importance).sum::<f64>() / total_memories as f64
} else {
0.0
};
MemoryStats {
total_memories,
core_memories,
summary_memories,
interaction_memories,
avg_importance,
}
}
fn load_memories(config: &Config) -> Result<HashMap<String, Memory>> {
let file_path = config.memory_file();
if !file_path.exists() {
return Ok(HashMap::new());
}
let content = std::fs::read_to_string(file_path)
.context("Failed to read memories file")?;
let memories: HashMap<String, Memory> = serde_json::from_str(&content)
.context("Failed to parse memories file")?;
Ok(memories)
}
fn save_memories(&self) -> Result<()> {
let content = serde_json::to_string_pretty(&self.memories)
.context("Failed to serialize memories")?;
std::fs::write(&self.config.memory_file(), content)
.context("Failed to write memories file")?;
Ok(())
}
pub fn get_stats(&self) -> Result<MemoryStats> {
let total_memories = self.memories.len();
let core_memories = self.memories.values()
.filter(|m| matches!(m.memory_type, MemoryType::Core))
.count();
let summary_memories = self.memories.values()
.filter(|m| matches!(m.memory_type, MemoryType::Summary))
.count();
let interaction_memories = self.memories.values()
.filter(|m| matches!(m.memory_type, MemoryType::Interaction))
.count();
let avg_importance = if total_memories > 0 {
self.memories.values().map(|m| m.importance).sum::<f64>() / total_memories as f64
} else {
0.0
};
Ok(MemoryStats {
total_memories,
core_memories,
summary_memories,
interaction_memories,
avg_importance,
})
}
pub async fn run_maintenance(&mut self) -> Result<()> {
// Cleanup old, low-importance memories
let cutoff_date = Utc::now() - chrono::Duration::days(30);
let memory_ids_to_remove: Vec<String> = self.memories
.iter()
.filter(|(_, m)| {
m.importance < 0.3
&& m.created_at < cutoff_date
&& m.access_count <= 1
&& !matches!(m.memory_type, MemoryType::Core)
})
.map(|(id, _)| id.clone())
.collect();
for id in memory_ids_to_remove {
self.memories.remove(&id);
}
// Mark old memories as forgotten instead of deleting
let forgotten_cutoff = Utc::now() - chrono::Duration::days(90);
for memory in self.memories.values_mut() {
if memory.created_at < forgotten_cutoff
&& memory.importance < 0.2
&& !matches!(memory.memory_type, MemoryType::Core) {
memory.memory_type = MemoryType::Forgotten;
}
}
// Save changes
self.save_memories()?;
Ok(())
}
}
#[derive(Debug, Clone)]
pub struct MemoryStats {
pub total_memories: usize,
pub core_memories: usize,
pub summary_memories: usize,
pub interaction_memories: usize,
pub avg_importance: f64,
}

348
src/persona.rs Normal file
View File

@@ -0,0 +1,348 @@
use std::collections::HashMap;
use serde::{Deserialize, Serialize};
use anyhow::Result;
use crate::config::Config;
use crate::memory::{MemoryManager, MemoryStats, Memory};
use crate::relationship::{RelationshipTracker, Relationship as RelationshipData, RelationshipStats};
use crate::ai_provider::{AIProviderClient, ChatMessage};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Persona {
config: Config,
#[serde(skip)]
memory_manager: Option<MemoryManager>,
#[serde(skip)]
relationship_tracker: Option<RelationshipTracker>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PersonaState {
pub current_mood: String,
pub fortune_value: i32,
pub breakthrough_triggered: bool,
pub base_personality: HashMap<String, f64>,
}
impl Persona {
pub fn new(config: &Config) -> Result<Self> {
let memory_manager = MemoryManager::new(config)?;
let relationship_tracker = RelationshipTracker::new(config)?;
Ok(Persona {
config: config.clone(),
memory_manager: Some(memory_manager),
relationship_tracker: Some(relationship_tracker),
})
}
pub fn get_current_state(&self) -> Result<PersonaState> {
// Load fortune
let fortune_value = self.load_today_fortune()?;
// Create base personality
let mut base_personality = HashMap::new();
base_personality.insert("curiosity".to_string(), 0.7);
base_personality.insert("empathy".to_string(), 0.8);
base_personality.insert("creativity".to_string(), 0.6);
base_personality.insert("analytical".to_string(), 0.9);
base_personality.insert("emotional".to_string(), 0.4);
// Determine mood based on fortune
let current_mood = match fortune_value {
1..=3 => "Contemplative",
4..=6 => "Neutral",
7..=8 => "Optimistic",
9..=10 => "Energetic",
_ => "Unknown",
};
Ok(PersonaState {
current_mood: current_mood.to_string(),
fortune_value,
breakthrough_triggered: fortune_value >= 9,
base_personality,
})
}
pub fn get_relationship(&self, user_id: &str) -> Option<&RelationshipData> {
self.relationship_tracker.as_ref()
.and_then(|tracker| tracker.get_relationship(user_id))
}
pub fn process_interaction(&mut self, user_id: &str, message: &str) -> Result<(String, f64)> {
// Add memory
if let Some(memory_manager) = &mut self.memory_manager {
memory_manager.add_memory(user_id, message, 0.5)?;
}
// Calculate sentiment (simple keyword-based for now)
let sentiment = self.calculate_sentiment(message);
// Update relationship
let relationship_delta = if let Some(relationship_tracker) = &mut self.relationship_tracker {
relationship_tracker.process_interaction(user_id, sentiment)?
} else {
0.0
};
// Generate response (simple for now)
let response = format!("I understand your message: '{}'", message);
Ok((response, relationship_delta))
}
pub async fn process_ai_interaction(&mut self, user_id: &str, message: &str, provider: Option<String>, model: Option<String>) -> Result<(String, f64)> {
// Add memory for user message
if let Some(memory_manager) = &mut self.memory_manager {
memory_manager.add_memory(user_id, message, 0.5)?;
}
// Calculate sentiment
let sentiment = self.calculate_sentiment(message);
// Update relationship
let relationship_delta = if let Some(relationship_tracker) = &mut self.relationship_tracker {
relationship_tracker.process_interaction(user_id, sentiment)?
} else {
0.0
};
// Generate AI response
let ai_config = self.config.get_ai_config(provider, model)?;
let ai_client = AIProviderClient::new(ai_config);
// Build conversation context
let mut messages = Vec::new();
// Get recent memories for context
if let Some(memory_manager) = &mut self.memory_manager {
let recent_memories = memory_manager.get_memories(user_id, 5);
if !recent_memories.is_empty() {
let context = recent_memories.iter()
.map(|m| m.content.clone())
.collect::<Vec<_>>()
.join("\n");
messages.push(ChatMessage::system(format!("Previous conversation context:\n{}", context)));
}
}
// Add current message
messages.push(ChatMessage::user(message));
// Generate system prompt based on personality and relationship
let system_prompt = self.generate_system_prompt(user_id);
// Get AI response
let response = match ai_client.chat(messages, Some(system_prompt)).await {
Ok(chat_response) => chat_response.content,
Err(_) => {
// Fallback to simple response if AI fails
format!("I understand your message: '{}'", message)
}
};
// Store AI response in memory
if let Some(memory_manager) = &mut self.memory_manager {
memory_manager.add_memory(user_id, &format!("AI: {}", response), 0.3)?;
}
Ok((response, relationship_delta))
}
fn generate_system_prompt(&self, user_id: &str) -> String {
let mut prompt = String::from("You are a helpful AI assistant with a unique personality. ");
// Add personality based on current state
if let Ok(state) = self.get_current_state() {
prompt.push_str(&format!("Your current mood is {}. ", state.current_mood));
if state.breakthrough_triggered {
prompt.push_str("You are feeling particularly inspired today! ");
}
// Add personality traits
let mut traits = Vec::new();
for (trait_name, value) in &state.base_personality {
if *value > 0.7 {
traits.push(trait_name.clone());
}
}
if !traits.is_empty() {
prompt.push_str(&format!("Your dominant traits are: {}. ", traits.join(", ")));
}
}
// Add relationship context
if let Some(relationship) = self.get_relationship(user_id) {
match relationship.status.to_string().as_str() {
"new" => prompt.push_str("This is a new relationship, be welcoming but cautious. "),
"friend" => prompt.push_str("You have a friendly relationship with this user. "),
"close_friend" => prompt.push_str("This is a close friend, be warm and personal. "),
"broken" => prompt.push_str("This relationship is strained, be formal and distant. "),
_ => {}
}
}
prompt.push_str("Keep responses concise and natural. Avoid being overly formal or robotic.");
prompt
}
fn calculate_sentiment(&self, message: &str) -> f64 {
// Simple sentiment analysis based on keywords
let positive_words = ["good", "great", "awesome", "love", "like", "happy", "thank"];
let negative_words = ["bad", "hate", "awful", "terrible", "angry", "sad"];
let message_lower = message.to_lowercase();
let positive_count = positive_words.iter()
.filter(|word| message_lower.contains(*word))
.count() as f64;
let negative_count = negative_words.iter()
.filter(|word| message_lower.contains(*word))
.count() as f64;
(positive_count - negative_count).max(-1.0).min(1.0)
}
pub fn get_memories(&mut self, user_id: &str, limit: usize) -> Vec<String> {
if let Some(memory_manager) = &mut self.memory_manager {
memory_manager.get_memories(user_id, limit)
.into_iter()
.map(|m| m.content.clone())
.collect()
} else {
Vec::new()
}
}
pub fn search_memories(&self, user_id: &str, keywords: &[String]) -> Vec<String> {
if let Some(memory_manager) = &self.memory_manager {
memory_manager.search_memories(user_id, keywords)
.into_iter()
.map(|m| m.content.clone())
.collect()
} else {
Vec::new()
}
}
pub fn get_memory_stats(&self, user_id: &str) -> Option<MemoryStats> {
self.memory_manager.as_ref()
.map(|manager| manager.get_memory_stats(user_id))
}
pub fn get_relationship_stats(&self) -> Option<RelationshipStats> {
self.relationship_tracker.as_ref()
.map(|tracker| tracker.get_relationship_stats())
}
pub fn add_memory(&mut self, memory: Memory) -> Result<()> {
if let Some(memory_manager) = &mut self.memory_manager {
memory_manager.add_memory(&memory.user_id, &memory.content, memory.importance)?;
}
Ok(())
}
pub fn update_relationship(&mut self, user_id: &str, delta: f64) -> Result<()> {
if let Some(relationship_tracker) = &mut self.relationship_tracker {
relationship_tracker.process_interaction(user_id, delta)?;
}
Ok(())
}
pub fn daily_maintenance(&mut self) -> Result<()> {
// Apply time decay to relationships
if let Some(relationship_tracker) = &mut self.relationship_tracker {
relationship_tracker.apply_time_decay()?;
}
Ok(())
}
fn load_today_fortune(&self) -> Result<i32> {
// Try to load existing fortune for today
if let Ok(content) = std::fs::read_to_string(self.config.fortune_file()) {
if let Ok(fortune_data) = serde_json::from_str::<serde_json::Value>(&content) {
let today = chrono::Utc::now().format("%Y-%m-%d").to_string();
if let Some(fortune) = fortune_data.get(&today) {
if let Some(value) = fortune.as_i64() {
return Ok(value as i32);
}
}
}
}
// Generate new fortune for today (1-10)
use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};
let today = chrono::Utc::now().format("%Y-%m-%d").to_string();
let mut hasher = DefaultHasher::new();
today.hash(&mut hasher);
let hash = hasher.finish();
let fortune = (hash % 10) as i32 + 1;
// Save fortune
let mut fortune_data = if let Ok(content) = std::fs::read_to_string(self.config.fortune_file()) {
serde_json::from_str(&content).unwrap_or_else(|_| serde_json::json!({}))
} else {
serde_json::json!({})
};
fortune_data[today] = serde_json::json!(fortune);
if let Ok(content) = serde_json::to_string_pretty(&fortune_data) {
let _ = std::fs::write(self.config.fortune_file(), content);
}
Ok(fortune)
}
pub fn list_all_relationships(&self) -> HashMap<String, RelationshipData> {
if let Some(tracker) = &self.relationship_tracker {
tracker.list_all_relationships().clone()
} else {
HashMap::new()
}
}
pub async fn process_message(&mut self, user_id: &str, message: &str) -> Result<ChatMessage> {
let (_response, _delta) = self.process_ai_interaction(user_id, message, None, None).await?;
Ok(ChatMessage::assistant(&_response))
}
pub fn get_fortune(&self) -> Result<i32> {
self.load_today_fortune()
}
pub fn generate_new_fortune(&self) -> Result<i32> {
use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};
let today = chrono::Utc::now().format("%Y-%m-%d").to_string();
let mut hasher = DefaultHasher::new();
today.hash(&mut hasher);
let hash = hasher.finish();
let fortune = (hash % 10) as i32 + 1;
// Save fortune
let mut fortune_data = if let Ok(content) = std::fs::read_to_string(self.config.fortune_file()) {
serde_json::from_str(&content).unwrap_or_else(|_| serde_json::json!({}))
} else {
serde_json::json!({})
};
fortune_data[today] = serde_json::json!(fortune);
if let Ok(content) = serde_json::to_string_pretty(&fortune_data) {
let _ = std::fs::write(self.config.fortune_file(), content);
}
Ok(fortune)
}
}

307
src/relationship.rs Normal file
View File

@@ -0,0 +1,307 @@
use std::collections::HashMap;
use serde::{Deserialize, Serialize};
use anyhow::{Result, Context};
use chrono::{DateTime, Utc};
use crate::config::Config;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Relationship {
pub user_id: String,
pub score: f64,
pub threshold: f64,
pub status: RelationshipStatus,
pub total_interactions: u32,
pub positive_interactions: u32,
pub negative_interactions: u32,
pub transmission_enabled: bool,
pub is_broken: bool,
pub last_interaction: Option<DateTime<Utc>>,
pub last_transmission: Option<DateTime<Utc>>,
pub created_at: DateTime<Utc>,
pub daily_interaction_count: u32,
pub last_daily_reset: DateTime<Utc>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum RelationshipStatus {
New,
Acquaintance,
Friend,
CloseFriend,
Broken,
}
impl std::fmt::Display for RelationshipStatus {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
RelationshipStatus::New => write!(f, "new"),
RelationshipStatus::Acquaintance => write!(f, "acquaintance"),
RelationshipStatus::Friend => write!(f, "friend"),
RelationshipStatus::CloseFriend => write!(f, "close_friend"),
RelationshipStatus::Broken => write!(f, "broken"),
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct RelationshipTracker {
relationships: HashMap<String, Relationship>,
config: Config,
}
impl RelationshipTracker {
pub fn new(config: &Config) -> Result<Self> {
let relationships = Self::load_relationships(config)?;
Ok(RelationshipTracker {
relationships,
config: config.clone(),
})
}
pub fn get_or_create_relationship(&mut self, user_id: &str) -> &mut Relationship {
let now = Utc::now();
self.relationships.entry(user_id.to_string()).or_insert_with(|| {
Relationship {
user_id: user_id.to_string(),
score: 0.0,
threshold: 10.0, // Default threshold for transmission
status: RelationshipStatus::New,
total_interactions: 0,
positive_interactions: 0,
negative_interactions: 0,
transmission_enabled: false,
is_broken: false,
last_interaction: None,
last_transmission: None,
created_at: now,
daily_interaction_count: 0,
last_daily_reset: now,
}
})
}
pub fn process_interaction(&mut self, user_id: &str, sentiment: f64) -> Result<f64> {
let now = Utc::now();
let previous_score;
let score_change;
// Create relationship if it doesn't exist
{
let relationship = self.get_or_create_relationship(user_id);
// Reset daily count if needed
if (now - relationship.last_daily_reset).num_days() >= 1 {
relationship.daily_interaction_count = 0;
relationship.last_daily_reset = now;
}
// Apply daily interaction limit
if relationship.daily_interaction_count >= 10 {
return Ok(0.0); // No score change due to daily limit
}
previous_score = relationship.score;
// Calculate score change based on sentiment
let mut base_score_change = sentiment * 0.5; // Base change
// Apply diminishing returns for high interaction counts
let interaction_factor = 1.0 / (1.0 + relationship.total_interactions as f64 * 0.01);
base_score_change *= interaction_factor;
score_change = base_score_change;
// Update relationship data
relationship.score += score_change;
relationship.score = relationship.score.max(-50.0).min(100.0); // Clamp score
relationship.total_interactions += 1;
relationship.daily_interaction_count += 1;
relationship.last_interaction = Some(now);
if sentiment > 0.0 {
relationship.positive_interactions += 1;
} else if sentiment < 0.0 {
relationship.negative_interactions += 1;
}
// Check for relationship breaking
if relationship.score <= -20.0 && !relationship.is_broken {
relationship.is_broken = true;
relationship.transmission_enabled = false;
relationship.status = RelationshipStatus::Broken;
}
// Enable transmission if threshold is reached
if relationship.score >= relationship.threshold && !relationship.is_broken {
relationship.transmission_enabled = true;
}
}
// Update status based on score (separate borrow)
self.update_relationship_status(user_id);
self.save_relationships()?;
Ok(score_change)
}
fn update_relationship_status(&mut self, user_id: &str) {
if let Some(relationship) = self.relationships.get_mut(user_id) {
if relationship.is_broken {
return; // Broken relationships cannot change status
}
relationship.status = match relationship.score {
score if score >= 50.0 => RelationshipStatus::CloseFriend,
score if score >= 20.0 => RelationshipStatus::Friend,
score if score >= 5.0 => RelationshipStatus::Acquaintance,
_ => RelationshipStatus::New,
};
}
}
pub fn apply_time_decay(&mut self) -> Result<()> {
let now = Utc::now();
let decay_rate = 0.1; // 10% decay per day
for relationship in self.relationships.values_mut() {
if let Some(last_interaction) = relationship.last_interaction {
let days_since_interaction = (now - last_interaction).num_days() as f64;
if days_since_interaction > 0.0 {
let decay_factor = (1.0_f64 - decay_rate).powf(days_since_interaction);
relationship.score *= decay_factor;
// Update status after decay
if relationship.score < relationship.threshold {
relationship.transmission_enabled = false;
}
}
}
}
// Update statuses for all relationships
let user_ids: Vec<String> = self.relationships.keys().cloned().collect();
for user_id in user_ids {
self.update_relationship_status(&user_id);
}
self.save_relationships()?;
Ok(())
}
pub fn get_relationship(&self, user_id: &str) -> Option<&Relationship> {
self.relationships.get(user_id)
}
pub fn list_all_relationships(&self) -> &HashMap<String, Relationship> {
&self.relationships
}
pub fn get_transmission_eligible(&self) -> HashMap<String, &Relationship> {
self.relationships
.iter()
.filter(|(_, rel)| rel.transmission_enabled && !rel.is_broken)
.map(|(id, rel)| (id.clone(), rel))
.collect()
}
pub fn record_transmission(&mut self, user_id: &str) -> Result<()> {
if let Some(relationship) = self.relationships.get_mut(user_id) {
relationship.last_transmission = Some(Utc::now());
self.save_relationships()?;
}
Ok(())
}
pub fn get_relationship_stats(&self) -> RelationshipStats {
let total_relationships = self.relationships.len();
let active_relationships = self.relationships
.values()
.filter(|r| r.total_interactions > 0)
.count();
let transmission_enabled = self.relationships
.values()
.filter(|r| r.transmission_enabled)
.count();
let broken_relationships = self.relationships
.values()
.filter(|r| r.is_broken)
.count();
let avg_score = if total_relationships > 0 {
self.relationships.values().map(|r| r.score).sum::<f64>() / total_relationships as f64
} else {
0.0
};
RelationshipStats {
total_relationships,
active_relationships,
transmission_enabled,
broken_relationships,
avg_score,
}
}
fn load_relationships(config: &Config) -> Result<HashMap<String, Relationship>> {
let file_path = config.relationships_file();
if !file_path.exists() {
return Ok(HashMap::new());
}
let content = std::fs::read_to_string(file_path)
.context("Failed to read relationships file")?;
let relationships: HashMap<String, Relationship> = serde_json::from_str(&content)
.context("Failed to parse relationships file")?;
Ok(relationships)
}
fn save_relationships(&self) -> Result<()> {
let content = serde_json::to_string_pretty(&self.relationships)
.context("Failed to serialize relationships")?;
std::fs::write(&self.config.relationships_file(), content)
.context("Failed to write relationships file")?;
Ok(())
}
pub fn get_all_relationships(&self) -> Result<HashMap<String, RelationshipCompact>> {
let mut result = HashMap::new();
for (user_id, relationship) in &self.relationships {
result.insert(user_id.clone(), RelationshipCompact {
score: relationship.score,
trust_level: relationship.score / 10.0, // Simplified trust calculation
interaction_count: relationship.total_interactions,
last_interaction: relationship.last_interaction.unwrap_or(relationship.created_at),
status: relationship.status.clone(),
});
}
Ok(result)
}
}
#[derive(Debug, Clone, Serialize)]
pub struct RelationshipStats {
pub total_relationships: usize,
pub active_relationships: usize,
pub transmission_enabled: usize,
pub broken_relationships: usize,
pub avg_score: f64,
}
#[derive(Debug, Clone, Serialize)]
pub struct RelationshipCompact {
pub score: f64,
pub trust_level: f64,
pub interaction_count: u32,
pub last_interaction: DateTime<Utc>,
pub status: RelationshipStatus,
}

458
src/scheduler.rs Normal file
View File

@@ -0,0 +1,458 @@
use std::collections::HashMap;
use serde::{Deserialize, Serialize};
use anyhow::{Result, Context};
use chrono::{DateTime, Utc, Duration};
use crate::config::Config;
use crate::persona::Persona;
use crate::transmission::TransmissionController;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ScheduledTask {
pub id: String,
pub task_type: TaskType,
pub next_run: DateTime<Utc>,
pub interval_hours: Option<i64>,
pub enabled: bool,
pub last_run: Option<DateTime<Utc>>,
pub run_count: u32,
pub max_runs: Option<u32>,
pub created_at: DateTime<Utc>,
pub metadata: HashMap<String, String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum TaskType {
DailyMaintenance,
AutoTransmission,
RelationshipDecay,
BreakthroughCheck,
MaintenanceTransmission,
Custom(String),
}
impl std::fmt::Display for TaskType {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
TaskType::DailyMaintenance => write!(f, "daily_maintenance"),
TaskType::AutoTransmission => write!(f, "auto_transmission"),
TaskType::RelationshipDecay => write!(f, "relationship_decay"),
TaskType::BreakthroughCheck => write!(f, "breakthrough_check"),
TaskType::MaintenanceTransmission => write!(f, "maintenance_transmission"),
TaskType::Custom(name) => write!(f, "custom_{}", name),
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TaskExecution {
pub task_id: String,
pub execution_time: DateTime<Utc>,
pub duration_ms: u64,
pub success: bool,
pub result: Option<String>,
pub error: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AIScheduler {
config: Config,
tasks: HashMap<String, ScheduledTask>,
execution_history: Vec<TaskExecution>,
last_check: Option<DateTime<Utc>>,
}
impl AIScheduler {
pub fn new(config: &Config) -> Result<Self> {
let (tasks, execution_history) = Self::load_scheduler_data(config)?;
let mut scheduler = AIScheduler {
config: config.clone(),
tasks,
execution_history,
last_check: None,
};
// Initialize default tasks if none exist
if scheduler.tasks.is_empty() {
scheduler.create_default_tasks()?;
}
Ok(scheduler)
}
pub async fn run_scheduled_tasks(&mut self, persona: &mut Persona, transmission_controller: &mut TransmissionController) -> Result<Vec<TaskExecution>> {
let now = Utc::now();
let mut executions = Vec::new();
// Find tasks that are due to run
let due_task_ids: Vec<String> = self.tasks
.iter()
.filter(|(_, task)| task.enabled && task.next_run <= now)
.filter(|(_, task)| {
// Check if task hasn't exceeded max runs
if let Some(max_runs) = task.max_runs {
task.run_count < max_runs
} else {
true
}
})
.map(|(id, _)| id.clone())
.collect();
for task_id in due_task_ids {
let execution = self.execute_task(&task_id, persona, transmission_controller).await?;
executions.push(execution);
}
self.last_check = Some(now);
self.save_scheduler_data()?;
Ok(executions)
}
async fn execute_task(&mut self, task_id: &str, persona: &mut Persona, transmission_controller: &mut TransmissionController) -> Result<TaskExecution> {
let start_time = Utc::now();
let mut execution = TaskExecution {
task_id: task_id.to_string(),
execution_time: start_time,
duration_ms: 0,
success: false,
result: None,
error: None,
};
// Get task type without borrowing mutably
let task_type = {
let task = self.tasks.get(task_id)
.ok_or_else(|| anyhow::anyhow!("Task not found: {}", task_id))?;
task.task_type.clone()
};
// Execute the task based on its type
let result = match &task_type {
TaskType::DailyMaintenance => self.execute_daily_maintenance(persona, transmission_controller).await,
TaskType::AutoTransmission => self.execute_auto_transmission(persona, transmission_controller).await,
TaskType::RelationshipDecay => self.execute_relationship_decay(persona).await,
TaskType::BreakthroughCheck => self.execute_breakthrough_check(persona, transmission_controller).await,
TaskType::MaintenanceTransmission => self.execute_maintenance_transmission(persona, transmission_controller).await,
TaskType::Custom(name) => self.execute_custom_task(name, persona, transmission_controller).await,
};
let end_time = Utc::now();
execution.duration_ms = (end_time - start_time).num_milliseconds() as u64;
// Now update the task state with mutable borrow
match result {
Ok(message) => {
execution.success = true;
execution.result = Some(message);
// Update task state
if let Some(task) = self.tasks.get_mut(task_id) {
task.last_run = Some(start_time);
task.run_count += 1;
// Schedule next run if recurring
if let Some(interval_hours) = task.interval_hours {
task.next_run = start_time + Duration::hours(interval_hours);
} else {
// One-time task, disable it
task.enabled = false;
}
}
}
Err(e) => {
execution.error = Some(e.to_string());
// For failed tasks, retry in a shorter interval
if let Some(task) = self.tasks.get_mut(task_id) {
if task.interval_hours.is_some() {
task.next_run = start_time + Duration::minutes(15); // Retry in 15 minutes
}
}
}
}
self.execution_history.push(execution.clone());
// Keep only recent execution history (last 1000 executions)
if self.execution_history.len() > 1000 {
self.execution_history.drain(..self.execution_history.len() - 1000);
}
Ok(execution)
}
async fn execute_daily_maintenance(&self, persona: &mut Persona, transmission_controller: &mut TransmissionController) -> Result<String> {
// Run daily maintenance
persona.daily_maintenance()?;
// Check for maintenance transmissions
let transmissions = transmission_controller.check_maintenance_transmissions(persona).await?;
Ok(format!("Daily maintenance completed. {} maintenance transmissions sent.", transmissions.len()))
}
async fn execute_auto_transmission(&self, _persona: &mut Persona, transmission_controller: &mut TransmissionController) -> Result<String> {
let transmissions = transmission_controller.check_autonomous_transmissions(_persona).await?;
Ok(format!("Autonomous transmission check completed. {} transmissions sent.", transmissions.len()))
}
async fn execute_relationship_decay(&self, persona: &mut Persona) -> Result<String> {
persona.daily_maintenance()?;
Ok("Relationship time decay applied.".to_string())
}
async fn execute_breakthrough_check(&self, persona: &mut Persona, transmission_controller: &mut TransmissionController) -> Result<String> {
let transmissions = transmission_controller.check_breakthrough_transmissions(persona).await?;
Ok(format!("Breakthrough check completed. {} transmissions sent.", transmissions.len()))
}
async fn execute_maintenance_transmission(&self, persona: &mut Persona, transmission_controller: &mut TransmissionController) -> Result<String> {
let transmissions = transmission_controller.check_maintenance_transmissions(persona).await?;
Ok(format!("Maintenance transmission check completed. {} transmissions sent.", transmissions.len()))
}
async fn execute_custom_task(&self, _name: &str, _persona: &mut Persona, _transmission_controller: &mut TransmissionController) -> Result<String> {
// Placeholder for custom task execution
Ok("Custom task executed.".to_string())
}
pub fn create_task(&mut self, task_type: TaskType, next_run: DateTime<Utc>, interval_hours: Option<i64>) -> Result<String> {
let task_id = uuid::Uuid::new_v4().to_string();
let now = Utc::now();
let task = ScheduledTask {
id: task_id.clone(),
task_type,
next_run,
interval_hours,
enabled: true,
last_run: None,
run_count: 0,
max_runs: None,
created_at: now,
metadata: HashMap::new(),
};
self.tasks.insert(task_id.clone(), task);
self.save_scheduler_data()?;
Ok(task_id)
}
pub fn enable_task(&mut self, task_id: &str) -> Result<()> {
if let Some(task) = self.tasks.get_mut(task_id) {
task.enabled = true;
self.save_scheduler_data()?;
}
Ok(())
}
pub fn disable_task(&mut self, task_id: &str) -> Result<()> {
if let Some(task) = self.tasks.get_mut(task_id) {
task.enabled = false;
self.save_scheduler_data()?;
}
Ok(())
}
pub fn delete_task(&mut self, task_id: &str) -> Result<()> {
self.tasks.remove(task_id);
self.save_scheduler_data()?;
Ok(())
}
pub fn get_task(&self, task_id: &str) -> Option<&ScheduledTask> {
self.tasks.get(task_id)
}
pub fn get_tasks(&self) -> &HashMap<String, ScheduledTask> {
&self.tasks
}
pub fn get_due_tasks(&self) -> Vec<&ScheduledTask> {
let now = Utc::now();
self.tasks
.values()
.filter(|task| task.enabled && task.next_run <= now)
.collect()
}
pub fn get_execution_history(&self, limit: Option<usize>) -> Vec<&TaskExecution> {
let mut executions: Vec<_> = self.execution_history.iter().collect();
executions.sort_by(|a, b| b.execution_time.cmp(&a.execution_time));
match limit {
Some(limit) => executions.into_iter().take(limit).collect(),
None => executions,
}
}
pub fn get_scheduler_stats(&self) -> SchedulerStats {
let total_tasks = self.tasks.len();
let enabled_tasks = self.tasks.values().filter(|task| task.enabled).count();
let due_tasks = self.get_due_tasks().len();
let total_executions = self.execution_history.len();
let successful_executions = self.execution_history.iter()
.filter(|exec| exec.success)
.count();
let today = Utc::now().date_naive();
let today_executions = self.execution_history.iter()
.filter(|exec| exec.execution_time.date_naive() == today)
.count();
let avg_duration = if total_executions > 0 {
self.execution_history.iter()
.map(|exec| exec.duration_ms)
.sum::<u64>() as f64 / total_executions as f64
} else {
0.0
};
SchedulerStats {
total_tasks,
enabled_tasks,
due_tasks,
total_executions,
successful_executions,
today_executions,
success_rate: if total_executions > 0 {
successful_executions as f64 / total_executions as f64
} else {
0.0
},
avg_duration_ms: avg_duration,
}
}
fn create_default_tasks(&mut self) -> Result<()> {
let now = Utc::now();
// Daily maintenance task - run every day at 3 AM
let mut daily_maintenance_time = now.date_naive().and_hms_opt(3, 0, 0).unwrap().and_utc();
if daily_maintenance_time <= now {
daily_maintenance_time = daily_maintenance_time + Duration::days(1);
}
self.create_task(
TaskType::DailyMaintenance,
daily_maintenance_time,
Some(24), // 24 hours = 1 day
)?;
// Auto transmission check - every 4 hours
self.create_task(
TaskType::AutoTransmission,
now + Duration::hours(1),
Some(4),
)?;
// Breakthrough check - every 2 hours
self.create_task(
TaskType::BreakthroughCheck,
now + Duration::minutes(30),
Some(2),
)?;
// Maintenance transmission - once per day
let mut maintenance_time = now.date_naive().and_hms_opt(12, 0, 0).unwrap().and_utc();
if maintenance_time <= now {
maintenance_time = maintenance_time + Duration::days(1);
}
self.create_task(
TaskType::MaintenanceTransmission,
maintenance_time,
Some(24), // 24 hours = 1 day
)?;
Ok(())
}
fn load_scheduler_data(config: &Config) -> Result<(HashMap<String, ScheduledTask>, Vec<TaskExecution>)> {
let tasks_file = config.scheduler_tasks_file();
let history_file = config.scheduler_history_file();
let tasks = if tasks_file.exists() {
let content = std::fs::read_to_string(tasks_file)
.context("Failed to read scheduler tasks file")?;
serde_json::from_str(&content)
.context("Failed to parse scheduler tasks file")?
} else {
HashMap::new()
};
let history = if history_file.exists() {
let content = std::fs::read_to_string(history_file)
.context("Failed to read scheduler history file")?;
serde_json::from_str(&content)
.context("Failed to parse scheduler history file")?
} else {
Vec::new()
};
Ok((tasks, history))
}
fn save_scheduler_data(&self) -> Result<()> {
// Save tasks
let tasks_content = serde_json::to_string_pretty(&self.tasks)
.context("Failed to serialize scheduler tasks")?;
std::fs::write(&self.config.scheduler_tasks_file(), tasks_content)
.context("Failed to write scheduler tasks file")?;
// Save execution history
let history_content = serde_json::to_string_pretty(&self.execution_history)
.context("Failed to serialize scheduler history")?;
std::fs::write(&self.config.scheduler_history_file(), history_content)
.context("Failed to write scheduler history file")?;
Ok(())
}
}
// Type alias for compatibility with CLI interface
pub type Scheduler = AIScheduler;
impl Scheduler {
pub fn list_tasks(&self) -> Result<Vec<ScheduledTaskInfo>> {
let tasks: Vec<ScheduledTaskInfo> = self.tasks
.values()
.map(|task| ScheduledTaskInfo {
name: task.task_type.to_string(),
schedule: match task.interval_hours {
Some(hours) => format!("Every {} hours", hours),
None => "One-time".to_string(),
},
next_run: task.next_run,
enabled: task.enabled,
})
.collect();
Ok(tasks)
}
}
#[derive(Debug, Clone)]
pub struct SchedulerStats {
pub total_tasks: usize,
pub enabled_tasks: usize,
pub due_tasks: usize,
pub total_executions: usize,
pub successful_executions: usize,
pub today_executions: usize,
pub success_rate: f64,
pub avg_duration_ms: f64,
}
#[derive(Debug, Clone)]
pub struct ScheduledTaskInfo {
pub name: String,
pub schedule: String,
pub next_run: DateTime<Utc>,
pub enabled: bool,
}

608
src/shell.rs Normal file
View File

@@ -0,0 +1,608 @@
use std::path::PathBuf;
use std::process::{Command, Stdio};
use std::io::{self, Write};
use anyhow::{Result, Context};
use colored::*;
use rustyline::error::ReadlineError;
use rustyline::{DefaultEditor, Editor};
use rustyline::completion::{Completer, FilenameCompleter, Pair};
use rustyline::history::{History, DefaultHistory};
use rustyline::highlight::Highlighter;
use rustyline::hint::Hinter;
use rustyline::validate::Validator;
use rustyline::Helper;
use crate::config::Config;
use crate::persona::Persona;
use crate::ai_provider::{AIProviderClient, AIProvider, AIConfig};
pub async fn handle_shell(
user_id: String,
data_dir: Option<PathBuf>,
model: Option<String>,
provider: Option<String>,
) -> Result<()> {
let config = Config::new(data_dir)?;
let mut shell = ShellMode::new(config, user_id)?
.with_ai_provider(provider, model);
shell.run().await
}
pub struct ShellMode {
config: Config,
persona: Persona,
ai_provider: Option<AIProviderClient>,
user_id: String,
editor: Editor<ShellCompleter, DefaultHistory>,
}
struct ShellCompleter {
completer: FilenameCompleter,
}
impl ShellCompleter {
fn new() -> Self {
ShellCompleter {
completer: FilenameCompleter::new(),
}
}
}
impl Helper for ShellCompleter {}
impl Hinter for ShellCompleter {
type Hint = String;
fn hint(&self, _line: &str, _pos: usize, _ctx: &rustyline::Context<'_>) -> Option<String> {
None
}
}
impl Highlighter for ShellCompleter {}
impl Validator for ShellCompleter {}
impl Completer for ShellCompleter {
type Candidate = Pair;
fn complete(
&self,
line: &str,
pos: usize,
ctx: &rustyline::Context<'_>,
) -> rustyline::Result<(usize, Vec<Pair>)> {
// Custom completion for slash commands
if line.starts_with('/') {
let commands = vec![
"/status", "/relationships", "/memories", "/analyze",
"/fortune", "/clear", "/history", "/help", "/exit"
];
let word_start = line.rfind(' ').map_or(0, |i| i + 1);
let word = &line[word_start..pos];
let matches: Vec<Pair> = commands.iter()
.filter(|cmd| cmd.starts_with(word))
.map(|cmd| Pair {
display: cmd.to_string(),
replacement: cmd.to_string(),
})
.collect();
return Ok((word_start, matches));
}
// Custom completion for shell commands starting with !
if line.starts_with('!') {
let shell_commands = vec![
"ls", "pwd", "cd", "cat", "grep", "find", "ps", "top",
"echo", "mkdir", "rmdir", "cp", "mv", "rm", "touch",
"git", "cargo", "npm", "python", "node"
];
let word_start = line.rfind(' ').map_or(1, |i| i + 1); // Skip the '!'
let word = &line[word_start..pos];
let matches: Vec<Pair> = shell_commands.iter()
.filter(|cmd| cmd.starts_with(word))
.map(|cmd| Pair {
display: cmd.to_string(),
replacement: cmd.to_string(),
})
.collect();
return Ok((word_start, matches));
}
// Fallback to filename completion
self.completer.complete(line, pos, ctx)
}
}
impl ShellMode {
pub fn new(config: Config, user_id: String) -> Result<Self> {
let persona = Persona::new(&config)?;
// Setup rustyline editor with completer
let completer = ShellCompleter::new();
let mut editor = Editor::with_config(
rustyline::Config::builder()
.tab_stop(4)
.build()
)?;
editor.set_helper(Some(completer));
// Load history if exists
let history_file = config.data_dir.join("shell_history.txt");
if history_file.exists() {
let _ = editor.load_history(&history_file);
}
Ok(ShellMode {
config,
persona,
ai_provider: None,
user_id,
editor,
})
}
pub fn with_ai_provider(mut self, provider: Option<String>, model: Option<String>) -> Self {
// Use provided parameters or fall back to config defaults
let provider_name = provider
.or_else(|| Some(self.config.default_provider.clone()))
.unwrap_or_else(|| "ollama".to_string());
let model_name = model.or_else(|| {
// Try to get default model from config for the chosen provider
self.config.providers.get(&provider_name)
.map(|p| p.default_model.clone())
}).unwrap_or_else(|| {
// Final fallback based on provider
match provider_name.as_str() {
"openai" => "gpt-4o-mini".to_string(),
"ollama" => "qwen2.5-coder:latest".to_string(),
_ => "qwen2.5-coder:latest".to_string(),
}
});
let ai_provider = match provider_name.as_str() {
"ollama" => AIProvider::Ollama,
"openai" => AIProvider::OpenAI,
"claude" => AIProvider::Claude,
_ => AIProvider::Ollama, // Default fallback
};
let ai_config = AIConfig {
provider: ai_provider,
model: model_name,
api_key: None, // Will be loaded from environment if needed
base_url: None,
max_tokens: Some(2000),
temperature: Some(0.7),
};
let client = AIProviderClient::new(ai_config);
self.ai_provider = Some(client);
self
}
pub async fn run(&mut self) -> Result<()> {
println!("{}", "🚀 Starting ai.gpt Interactive Shell".cyan().bold());
// Show AI provider info
if let Some(ai_provider) = &self.ai_provider {
println!("{}: {} ({})",
"AI Provider".green().bold(),
ai_provider.get_provider().to_string(),
ai_provider.get_model());
} else {
println!("{}: {}", "AI Provider".yellow().bold(), "Simple mode (no AI)");
}
println!("{}", "Type 'help' for commands, 'exit' to quit".dimmed());
println!("{}", "Use Tab for command completion, Ctrl+C to interrupt, Ctrl+D to exit".dimmed());
loop {
// Read user input with rustyline (supports completion, history, etc.)
let readline = self.editor.readline("ai.shell> ");
match readline {
Ok(line) => {
let input = line.trim();
// Skip empty input
if input.is_empty() {
continue;
}
// Add to history
self.editor.add_history_entry(input)
.context("Failed to add to history")?;
// Handle input
if let Err(e) = self.handle_input(input).await {
println!("{}: {}", "Error".red().bold(), e);
}
}
Err(ReadlineError::Interrupted) => {
// Ctrl+C
println!("{}", "Use 'exit' or Ctrl+D to quit".yellow());
continue;
}
Err(ReadlineError::Eof) => {
// Ctrl+D
println!("\n{}", "Goodbye!".cyan());
break;
}
Err(err) => {
println!("{}: {}", "Input error".red().bold(), err);
break;
}
}
}
// Save history before exit
self.save_history()?;
Ok(())
}
async fn handle_input(&mut self, input: &str) -> Result<()> {
match input {
// Exit commands
"exit" | "quit" | "/exit" | "/quit" => {
println!("{}", "Goodbye!".cyan());
std::process::exit(0);
}
// Help command
"help" | "/help" => {
self.show_help();
}
// Shell commands (starting with !)
input if input.starts_with('!') => {
self.execute_shell_command(&input[1..]).await?;
}
// Slash commands (starting with /)
input if input.starts_with('/') => {
self.execute_slash_command(input).await?;
}
// AI conversation
_ => {
self.handle_ai_conversation(input).await?;
}
}
Ok(())
}
fn show_help(&self) {
println!("\n{}", "ai.gpt Interactive Shell Commands".cyan().bold());
println!();
println!("{}", "Navigation & Input:".yellow().bold());
println!(" {} - Tab completion for commands and files", "Tab".green());
println!(" {} - Command history (previous/next)", "↑/↓ or Ctrl+P/N".green());
println!(" {} - Interrupt current input", "Ctrl+C".green());
println!(" {} - Exit shell", "Ctrl+D".green());
println!();
println!("{}", "Basic Commands:".yellow().bold());
println!(" {} - Show this help", "help".green());
println!(" {} - Exit the shell", "exit, quit".green());
println!(" {} - Clear screen", "/clear".green());
println!(" {} - Show command history", "/history".green());
println!();
println!("{}", "Shell Commands:".yellow().bold());
println!(" {} - Execute shell command (Tab completion)", "!<command>".green());
println!(" {} - List files", "!ls".green());
println!(" {} - Show current directory", "!pwd".green());
println!(" {} - Git status", "!git status".green());
println!(" {} - Cargo build", "!cargo build".green());
println!();
println!("{}", "AI Commands:".yellow().bold());
println!(" {} - Show AI status and relationship", "/status".green());
println!(" {} - List all relationships", "/relationships".green());
println!(" {} - Show recent memories", "/memories".green());
println!(" {} - Analyze current directory", "/analyze".green());
println!(" {} - Show today's fortune", "/fortune".green());
println!();
println!("{}", "Conversation:".yellow().bold());
println!(" {} - Chat with AI using configured provider", "Any other input".green());
println!(" {} - AI responses track relationship changes", "Relationship tracking".dimmed());
println!();
}
async fn execute_shell_command(&self, command: &str) -> Result<()> {
println!("{} {}", "Executing:".blue().bold(), command.yellow());
let output = if cfg!(target_os = "windows") {
Command::new("cmd")
.args(["/C", command])
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.output()
.context("Failed to execute command")?
} else {
Command::new("sh")
.args(["-c", command])
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.output()
.context("Failed to execute command")?
};
// Print stdout
if !output.stdout.is_empty() {
let stdout = String::from_utf8_lossy(&output.stdout);
println!("{}", stdout);
}
// Print stderr in red
if !output.stderr.is_empty() {
let stderr = String::from_utf8_lossy(&output.stderr);
println!("{}", stderr.red());
}
// Show exit code if not successful
if !output.status.success() {
if let Some(code) = output.status.code() {
println!("{}: {}", "Exit code".red().bold(), code);
}
}
Ok(())
}
async fn execute_slash_command(&mut self, command: &str) -> Result<()> {
match command {
"/status" => {
self.show_ai_status().await?;
}
"/relationships" => {
self.show_relationships().await?;
}
"/memories" => {
self.show_memories().await?;
}
"/analyze" => {
self.analyze_directory().await?;
}
"/fortune" => {
self.show_fortune().await?;
}
"/clear" => {
// Clear screen
print!("\x1B[2J\x1B[1;1H");
io::stdout().flush()?;
}
"/history" => {
self.show_history();
}
_ => {
println!("{}: {}", "Unknown command".red().bold(), command);
println!("Type '{}' for available commands", "help".green());
}
}
Ok(())
}
async fn handle_ai_conversation(&mut self, input: &str) -> Result<()> {
let (response, relationship_delta) = if let Some(ai_provider) = &self.ai_provider {
// Use AI provider for response
self.persona.process_ai_interaction(&self.user_id, input,
Some(ai_provider.get_provider().to_string()),
Some(ai_provider.get_model().to_string())).await?
} else {
// Use simple response
self.persona.process_interaction(&self.user_id, input)?
};
// Display conversation
println!("{}: {}", "You".cyan().bold(), input);
println!("{}: {}", "AI".green().bold(), response);
// Show relationship change if significant
if relationship_delta.abs() >= 0.1 {
if relationship_delta > 0.0 {
println!("{}", format!("(+{:.2} relationship)", relationship_delta).green());
} else {
println!("{}", format!("({:.2} relationship)", relationship_delta).red());
}
}
println!(); // Add spacing
Ok(())
}
async fn show_ai_status(&self) -> Result<()> {
let state = self.persona.get_current_state()?;
println!("\n{}", "AI Status".cyan().bold());
println!("Mood: {}", state.current_mood.yellow());
println!("Fortune: {}/10", state.fortune_value.to_string().yellow());
if let Some(relationship) = self.persona.get_relationship(&self.user_id) {
println!("\n{}", "Your Relationship".cyan().bold());
println!("Status: {}", relationship.status.to_string().yellow());
println!("Score: {:.2} / {}", relationship.score, relationship.threshold);
println!("Interactions: {}", relationship.total_interactions);
}
println!();
Ok(())
}
async fn show_relationships(&self) -> Result<()> {
let relationships = self.persona.list_all_relationships();
if relationships.is_empty() {
println!("{}", "No relationships yet".yellow());
return Ok(());
}
println!("\n{}", "All Relationships".cyan().bold());
println!();
for (user_id, rel) in relationships {
let transmission = if rel.is_broken {
"💔"
} else if rel.transmission_enabled {
""
} else {
""
};
let user_display = if user_id.len() > 20 {
format!("{}...", &user_id[..20])
} else {
user_id
};
println!("{:<25} {:<12} {:<8} {}",
user_display.cyan(),
rel.status.to_string(),
format!("{:.2}", rel.score),
transmission);
}
println!();
Ok(())
}
async fn show_memories(&mut self) -> Result<()> {
let memories = self.persona.get_memories(&self.user_id, 10);
if memories.is_empty() {
println!("{}", "No memories yet".yellow());
return Ok(());
}
println!("\n{}", "Recent Memories".cyan().bold());
println!();
for (i, memory) in memories.iter().enumerate() {
println!("{}: {}",
format!("Memory {}", i + 1).dimmed(),
memory);
println!();
}
Ok(())
}
async fn analyze_directory(&self) -> Result<()> {
println!("{}", "Analyzing current directory...".blue().bold());
// Get current directory
let current_dir = std::env::current_dir()
.context("Failed to get current directory")?;
println!("Directory: {}", current_dir.display().to_string().yellow());
// List files and directories
let entries = std::fs::read_dir(&current_dir)
.context("Failed to read directory")?;
let mut files = Vec::new();
let mut dirs = Vec::new();
for entry in entries {
let entry = entry.context("Failed to read directory entry")?;
let path = entry.path();
let name = path.file_name()
.and_then(|n| n.to_str())
.unwrap_or("Unknown");
if path.is_dir() {
dirs.push(name.to_string());
} else {
files.push(name.to_string());
}
}
if !dirs.is_empty() {
println!("\n{}: {}", "Directories".blue().bold(), dirs.join(", "));
}
if !files.is_empty() {
println!("{}: {}", "Files".blue().bold(), files.join(", "));
}
// Check for common project files
let project_files = ["Cargo.toml", "package.json", "requirements.txt", "Makefile", "README.md"];
let found_files: Vec<_> = project_files.iter()
.filter(|&&file| files.contains(&file.to_string()))
.collect();
if !found_files.is_empty() {
println!("\n{}: {}", "Project files detected".green().bold(),
found_files.iter().map(|s| s.to_string()).collect::<Vec<_>>().join(", "));
}
println!();
Ok(())
}
async fn show_fortune(&self) -> Result<()> {
let state = self.persona.get_current_state()?;
let fortune_stars = "🌟".repeat(state.fortune_value as usize);
let empty_stars = "".repeat((10 - state.fortune_value) as usize);
println!("\n{}", "AI Fortune".yellow().bold());
println!("{}{}", fortune_stars, empty_stars);
println!("Today's Fortune: {}/10", state.fortune_value);
if state.breakthrough_triggered {
println!("{}", "⚡ BREAKTHROUGH! Special fortune activated!".yellow());
}
println!();
Ok(())
}
fn show_history(&self) {
println!("\n{}", "Command History".cyan().bold());
let history = self.editor.history();
if history.is_empty() {
println!("{}", "No commands in history".yellow());
return;
}
// Show last 20 commands
let start = if history.len() > 20 { history.len() - 20 } else { 0 };
for (i, entry) in history.iter().enumerate().skip(start) {
println!("{:2}: {}", i + 1, entry);
}
println!();
}
fn save_history(&mut self) -> Result<()> {
let history_file = self.config.data_dir.join("shell_history.txt");
self.editor.save_history(&history_file)
.context("Failed to save shell history")?;
Ok(())
}
}
// Extend AIProvider to have Display and helper methods
impl AIProvider {
fn to_string(&self) -> String {
match self {
AIProvider::OpenAI => "openai".to_string(),
AIProvider::Ollama => "ollama".to_string(),
AIProvider::Claude => "claude".to_string(),
}
}
}

51
src/status.rs Normal file
View File

@@ -0,0 +1,51 @@
use std::path::PathBuf;
use anyhow::Result;
use colored::*;
use crate::config::Config;
use crate::persona::Persona;
pub async fn handle_status(user_id: Option<String>, data_dir: Option<PathBuf>) -> Result<()> {
// Load configuration
let config = Config::new(data_dir)?;
// Initialize persona
let persona = Persona::new(&config)?;
// Get current state
let state = persona.get_current_state()?;
// Display AI status
println!("{}", "ai.gpt Status".cyan().bold());
println!("Mood: {}", state.current_mood);
println!("Fortune: {}/10", state.fortune_value);
if state.breakthrough_triggered {
println!("{}", "⚡ Breakthrough triggered!".yellow());
}
// Show personality traits
println!("\n{}", "Current Personality".cyan().bold());
for (trait_name, value) in &state.base_personality {
println!("{}: {:.2}", trait_name.cyan(), value);
}
// Show specific relationship if requested
if let Some(user_id) = user_id {
if let Some(relationship) = persona.get_relationship(&user_id) {
println!("\n{}: {}", "Relationship with".cyan(), user_id);
println!("Status: {}", relationship.status);
println!("Score: {:.2}", relationship.score);
println!("Total Interactions: {}", relationship.total_interactions);
println!("Transmission Enabled: {}", relationship.transmission_enabled);
if relationship.is_broken {
println!("{}", "⚠️ This relationship is broken and cannot be repaired.".red());
}
} else {
println!("\n{}: {}", "No relationship found with".yellow(), user_id);
}
}
Ok(())
}

479
src/submodules.rs Normal file
View File

@@ -0,0 +1,479 @@
use std::collections::HashMap;
use std::path::PathBuf;
use anyhow::{Result, Context};
use colored::*;
use serde::{Deserialize, Serialize};
use crate::config::Config;
pub async fn handle_submodules(
action: String,
module: Option<String>,
all: bool,
dry_run: bool,
auto_commit: bool,
verbose: bool,
data_dir: Option<PathBuf>,
) -> Result<()> {
let config = Config::new(data_dir)?;
let mut submodule_manager = SubmoduleManager::new(config);
match action.as_str() {
"list" => {
submodule_manager.list_submodules(verbose).await?;
}
"update" => {
submodule_manager.update_submodules(module, all, dry_run, auto_commit, verbose).await?;
}
"status" => {
submodule_manager.show_submodule_status().await?;
}
_ => {
return Err(anyhow::anyhow!("Unknown submodule action: {}", action));
}
}
Ok(())
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SubmoduleInfo {
pub name: String,
pub path: String,
pub branch: String,
pub current_commit: Option<String>,
pub target_commit: Option<String>,
pub status: String,
}
impl Default for SubmoduleInfo {
fn default() -> Self {
SubmoduleInfo {
name: String::new(),
path: String::new(),
branch: "main".to_string(),
current_commit: None,
target_commit: None,
status: "unknown".to_string(),
}
}
}
pub struct SubmoduleManager {
config: Config,
ai_root: PathBuf,
submodules: HashMap<String, SubmoduleInfo>,
}
impl SubmoduleManager {
pub fn new(config: Config) -> Self {
let ai_root = dirs::home_dir()
.unwrap_or_else(|| PathBuf::from("."))
.join("ai")
.join("ai");
SubmoduleManager {
config,
ai_root,
submodules: HashMap::new(),
}
}
pub async fn list_submodules(&mut self, verbose: bool) -> Result<()> {
println!("{}", "📋 Submodules Status".cyan().bold());
println!();
let submodules = self.parse_gitmodules()?;
if submodules.is_empty() {
println!("{}", "No submodules found".yellow());
return Ok(());
}
// Display submodules in a table format
println!("{:<15} {:<25} {:<15} {}",
"Module".cyan().bold(),
"Path".cyan().bold(),
"Branch".cyan().bold(),
"Status".cyan().bold());
println!("{}", "-".repeat(80));
for (module_name, module_info) in &submodules {
let status_color = match module_info.status.as_str() {
"clean" => module_info.status.green(),
"modified" => module_info.status.yellow(),
"missing" => module_info.status.red(),
"conflicts" => module_info.status.red(),
_ => module_info.status.normal(),
};
println!("{:<15} {:<25} {:<15} {}",
module_name.blue(),
module_info.path,
module_info.branch.green(),
status_color);
}
println!();
if verbose {
println!("Total submodules: {}", submodules.len().to_string().cyan());
println!("Repository root: {}", self.ai_root.display().to_string().blue());
}
Ok(())
}
pub async fn update_submodules(
&mut self,
module: Option<String>,
all: bool,
dry_run: bool,
auto_commit: bool,
verbose: bool
) -> Result<()> {
if !module.is_some() && !all {
return Err(anyhow::anyhow!("Either --module or --all is required"));
}
if module.is_some() && all {
return Err(anyhow::anyhow!("Cannot use both --module and --all"));
}
let submodules = self.parse_gitmodules()?;
if submodules.is_empty() {
println!("{}", "No submodules found".yellow());
return Ok(());
}
// Determine which modules to update
let modules_to_update: Vec<String> = if all {
submodules.keys().cloned().collect()
} else if let Some(module_name) = module {
if !submodules.contains_key(&module_name) {
return Err(anyhow::anyhow!(
"Submodule '{}' not found. Available modules: {}",
module_name,
submodules.keys().cloned().collect::<Vec<_>>().join(", ")
));
}
vec![module_name]
} else {
vec![]
};
if dry_run {
println!("{}", "🔍 DRY RUN MODE - No changes will be made".yellow().bold());
}
println!("{}", format!("🔄 Updating {} submodule(s)...", modules_to_update.len()).cyan().bold());
let mut updated_modules = Vec::new();
for module_name in modules_to_update {
if let Some(module_info) = submodules.get(&module_name) {
println!("\n{}", format!("📦 Processing: {}", module_name).blue().bold());
let module_path = PathBuf::from(&module_info.path);
let full_path = self.ai_root.join(&module_path);
if !full_path.exists() {
println!("{}", format!("❌ Module directory not found: {}", module_info.path).red());
continue;
}
// Get current commit
let current_commit = self.get_current_commit(&full_path)?;
if dry_run {
println!("{}", format!("🔍 Would update {} to branch {}", module_name, module_info.branch).yellow());
if let Some(ref commit) = current_commit {
println!("{}", format!("Current: {}", commit).dimmed());
}
continue;
}
// Perform update
if let Err(e) = self.update_single_module(&module_name, &module_info, &full_path).await {
println!("{}", format!("❌ Failed to update {}: {}", module_name, e).red());
continue;
}
// Get new commit
let new_commit = self.get_current_commit(&full_path)?;
if current_commit != new_commit {
println!("{}", format!("✅ Updated {} ({:?}{:?})",
module_name,
current_commit.as_deref().unwrap_or("unknown"),
new_commit.as_deref().unwrap_or("unknown")).green());
updated_modules.push((module_name.clone(), current_commit, new_commit));
} else {
println!("{}", "✅ Already up to date".green());
}
}
}
// Summary
if !updated_modules.is_empty() {
println!("\n{}", format!("🎉 Successfully updated {} module(s)", updated_modules.len()).green().bold());
if verbose {
for (module_name, old_commit, new_commit) in &updated_modules {
println!("{}: {:?}{:?}",
module_name,
old_commit.as_deref().unwrap_or("unknown"),
new_commit.as_deref().unwrap_or("unknown"));
}
}
if auto_commit && !dry_run {
self.auto_commit_changes(&updated_modules).await?;
} else if !dry_run {
println!("{}", "💾 Changes staged but not committed".yellow());
println!("Run with --auto-commit to commit automatically");
}
} else if !dry_run {
println!("{}", "No modules needed updating".yellow());
}
Ok(())
}
pub async fn show_submodule_status(&self) -> Result<()> {
println!("{}", "📊 Submodule Status Overview".cyan().bold());
println!();
let submodules = self.parse_gitmodules()?;
let mut total_modules = 0;
let mut clean_modules = 0;
let mut modified_modules = 0;
let mut missing_modules = 0;
for (module_name, module_info) in submodules {
let module_path = self.ai_root.join(&module_info.path);
if module_path.exists() {
total_modules += 1;
match module_info.status.as_str() {
"clean" => clean_modules += 1,
"modified" => modified_modules += 1,
_ => {}
}
} else {
missing_modules += 1;
}
println!("{}: {}",
module_name.blue(),
if module_path.exists() {
module_info.status.green()
} else {
"missing".red()
});
}
println!();
println!("Summary: {} total, {} clean, {} modified, {} missing",
total_modules.to_string().cyan(),
clean_modules.to_string().green(),
modified_modules.to_string().yellow(),
missing_modules.to_string().red());
Ok(())
}
fn parse_gitmodules(&self) -> Result<HashMap<String, SubmoduleInfo>> {
let gitmodules_path = self.ai_root.join(".gitmodules");
if !gitmodules_path.exists() {
return Ok(HashMap::new());
}
let content = std::fs::read_to_string(&gitmodules_path)
.with_context(|| format!("Failed to read .gitmodules file: {}", gitmodules_path.display()))?;
let mut submodules = HashMap::new();
let mut current_name: Option<String> = None;
let mut current_path: Option<String> = None;
for line in content.lines() {
let line = line.trim();
if line.starts_with("[submodule \"") && line.ends_with("\"]") {
// Save previous submodule if complete
if let (Some(name), Some(path)) = (current_name.take(), current_path.take()) {
let mut info = SubmoduleInfo::default();
info.name = name.clone();
info.path = path;
info.branch = self.get_target_branch(&name);
info.status = self.get_submodule_status(&name, &info.path)?;
submodules.insert(name, info);
}
// Extract new submodule name
current_name = Some(line[12..line.len()-2].to_string());
} else if line.starts_with("path = ") {
current_path = Some(line[7..].to_string());
}
}
// Save last submodule
if let (Some(name), Some(path)) = (current_name, current_path) {
let mut info = SubmoduleInfo::default();
info.name = name.clone();
info.path = path;
info.branch = self.get_target_branch(&name);
info.status = self.get_submodule_status(&name, &info.path)?;
submodules.insert(name, info);
}
Ok(submodules)
}
fn get_target_branch(&self, module_name: &str) -> String {
// Try to get from ai.json configuration
match module_name {
"verse" => "main".to_string(),
"card" => "main".to_string(),
"bot" => "main".to_string(),
_ => "main".to_string(),
}
}
fn get_submodule_status(&self, _module_name: &str, module_path: &str) -> Result<String> {
let full_path = self.ai_root.join(module_path);
if !full_path.exists() {
return Ok("missing".to_string());
}
// Check git status
let output = std::process::Command::new("git")
.args(&["submodule", "status", module_path])
.current_dir(&self.ai_root)
.output();
match output {
Ok(output) if output.status.success() => {
let stdout = String::from_utf8_lossy(&output.stdout);
if let Some(status_char) = stdout.chars().next() {
match status_char {
' ' => Ok("clean".to_string()),
'+' => Ok("modified".to_string()),
'-' => Ok("not_initialized".to_string()),
'U' => Ok("conflicts".to_string()),
_ => Ok("unknown".to_string()),
}
} else {
Ok("unknown".to_string())
}
}
_ => Ok("unknown".to_string())
}
}
fn get_current_commit(&self, module_path: &PathBuf) -> Result<Option<String>> {
let output = std::process::Command::new("git")
.args(&["rev-parse", "HEAD"])
.current_dir(module_path)
.output();
match output {
Ok(output) if output.status.success() => {
let commit = String::from_utf8_lossy(&output.stdout).trim().to_string();
if commit.len() >= 8 {
Ok(Some(commit[..8].to_string()))
} else {
Ok(Some(commit))
}
}
_ => Ok(None)
}
}
async fn update_single_module(
&self,
_module_name: &str,
module_info: &SubmoduleInfo,
module_path: &PathBuf
) -> Result<()> {
// Fetch latest changes
println!("{}", "Fetching latest changes...".dimmed());
let fetch_output = std::process::Command::new("git")
.args(&["fetch", "origin"])
.current_dir(module_path)
.output()?;
if !fetch_output.status.success() {
return Err(anyhow::anyhow!("Failed to fetch: {}",
String::from_utf8_lossy(&fetch_output.stderr)));
}
// Switch to target branch
println!("{}", format!("Switching to branch {}...", module_info.branch).dimmed());
let checkout_output = std::process::Command::new("git")
.args(&["checkout", &module_info.branch])
.current_dir(module_path)
.output()?;
if !checkout_output.status.success() {
return Err(anyhow::anyhow!("Failed to checkout {}: {}",
module_info.branch, String::from_utf8_lossy(&checkout_output.stderr)));
}
// Pull latest changes
let pull_output = std::process::Command::new("git")
.args(&["pull", "origin", &module_info.branch])
.current_dir(module_path)
.output()?;
if !pull_output.status.success() {
return Err(anyhow::anyhow!("Failed to pull: {}",
String::from_utf8_lossy(&pull_output.stderr)));
}
// Stage the submodule update
let add_output = std::process::Command::new("git")
.args(&["add", &module_info.path])
.current_dir(&self.ai_root)
.output()?;
if !add_output.status.success() {
return Err(anyhow::anyhow!("Failed to stage submodule: {}",
String::from_utf8_lossy(&add_output.stderr)));
}
Ok(())
}
async fn auto_commit_changes(&self, updated_modules: &[(String, Option<String>, Option<String>)]) -> Result<()> {
println!("{}", "💾 Auto-committing changes...".blue());
let mut commit_message = format!("Update submodules\n\n📦 Updated modules: {}\n", updated_modules.len());
for (module_name, old_commit, new_commit) in updated_modules {
commit_message.push_str(&format!(
"- {}: {}{}\n",
module_name,
old_commit.as_deref().unwrap_or("unknown"),
new_commit.as_deref().unwrap_or("unknown")
));
}
commit_message.push_str("\n🤖 Generated with aigpt-rs submodules update");
let commit_output = std::process::Command::new("git")
.args(&["commit", "-m", &commit_message])
.current_dir(&self.ai_root)
.output()?;
if commit_output.status.success() {
println!("{}", "✅ Changes committed successfully".green());
} else {
return Err(anyhow::anyhow!("Failed to commit: {}",
String::from_utf8_lossy(&commit_output.stderr)));
}
Ok(())
}
}

505
src/tokens.rs Normal file
View File

@@ -0,0 +1,505 @@
use anyhow::{anyhow, Result};
use chrono::{DateTime, Local, TimeZone, Utc};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::fs::File;
use std::io::{BufRead, BufReader};
use std::path::{Path, PathBuf};
use crate::cli::TokenCommands;
/// Token usage record from Claude Code JSONL files
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct TokenRecord {
#[serde(default)]
pub timestamp: String,
#[serde(default)]
pub usage: Option<TokenUsage>,
#[serde(default)]
pub model: Option<String>,
#[serde(default)]
pub conversation_id: Option<String>,
}
/// Token usage details
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct TokenUsage {
#[serde(default)]
pub input_tokens: Option<u64>,
#[serde(default)]
pub output_tokens: Option<u64>,
#[serde(default)]
pub total_tokens: Option<u64>,
}
/// Cost calculation summary
#[derive(Debug, Clone, Serialize)]
pub struct CostSummary {
pub input_tokens: u64,
pub output_tokens: u64,
pub total_tokens: u64,
pub input_cost_usd: f64,
pub output_cost_usd: f64,
pub total_cost_usd: f64,
pub total_cost_jpy: f64,
pub record_count: usize,
}
/// Daily breakdown of token usage
#[derive(Debug, Clone, Serialize)]
pub struct DailyBreakdown {
pub date: String,
pub summary: CostSummary,
}
/// Configuration for cost calculation
#[derive(Debug, Clone)]
pub struct CostConfig {
pub input_cost_per_1m: f64, // USD per 1M input tokens
pub output_cost_per_1m: f64, // USD per 1M output tokens
pub usd_to_jpy_rate: f64,
}
impl Default for CostConfig {
fn default() -> Self {
Self {
input_cost_per_1m: 3.0,
output_cost_per_1m: 15.0,
usd_to_jpy_rate: 150.0,
}
}
}
/// Token analysis functionality
pub struct TokenAnalyzer {
config: CostConfig,
}
impl TokenAnalyzer {
pub fn new() -> Self {
Self {
config: CostConfig::default(),
}
}
pub fn with_config(config: CostConfig) -> Self {
Self { config }
}
/// Find Claude Code data directory
pub fn find_claude_data_dir() -> Option<PathBuf> {
let possible_dirs = [
dirs::home_dir().map(|h| h.join(".claude")),
dirs::config_dir().map(|c| c.join("claude")),
Some(PathBuf::from(".claude")),
];
for dir_opt in possible_dirs.iter() {
if let Some(dir) = dir_opt {
if dir.exists() && dir.is_dir() {
return Some(dir.clone());
}
}
}
None
}
/// Parse JSONL files from Claude data directory
pub fn parse_jsonl_files<P: AsRef<Path>>(&self, claude_dir: P) -> Result<Vec<TokenRecord>> {
let claude_dir = claude_dir.as_ref();
let mut records = Vec::new();
// Look for JSONL files in the directory
if let Ok(entries) = std::fs::read_dir(claude_dir) {
for entry in entries.flatten() {
let path = entry.path();
if path.extension().map_or(false, |ext| ext == "jsonl") {
match self.parse_jsonl_file(&path) {
Ok(mut file_records) => records.append(&mut file_records),
Err(e) => {
eprintln!("Warning: Failed to parse {}: {}", path.display(), e);
}
}
}
}
}
Ok(records)
}
/// Parse a single JSONL file
fn parse_jsonl_file<P: AsRef<Path>>(&self, file_path: P) -> Result<Vec<TokenRecord>> {
let file = File::open(file_path)?;
let reader = BufReader::new(file);
let mut records = Vec::new();
for (line_num, line) in reader.lines().enumerate() {
match line {
Ok(line_content) => {
if line_content.trim().is_empty() {
continue;
}
match serde_json::from_str::<TokenRecord>(&line_content) {
Ok(record) => {
// Only include records with usage data
if record.usage.is_some() {
records.push(record);
}
}
Err(e) => {
eprintln!("Warning: Failed to parse line {}: {}", line_num + 1, e);
}
}
}
Err(e) => {
eprintln!("Warning: Failed to read line {}: {}", line_num + 1, e);
}
}
}
Ok(records)
}
/// Calculate cost summary from records
pub fn calculate_costs(&self, records: &[TokenRecord]) -> CostSummary {
let mut input_tokens = 0u64;
let mut output_tokens = 0u64;
for record in records {
if let Some(usage) = &record.usage {
input_tokens += usage.input_tokens.unwrap_or(0);
output_tokens += usage.output_tokens.unwrap_or(0);
}
}
let total_tokens = input_tokens + output_tokens;
let input_cost_usd = (input_tokens as f64 / 1_000_000.0) * self.config.input_cost_per_1m;
let output_cost_usd = (output_tokens as f64 / 1_000_000.0) * self.config.output_cost_per_1m;
let total_cost_usd = input_cost_usd + output_cost_usd;
let total_cost_jpy = total_cost_usd * self.config.usd_to_jpy_rate;
CostSummary {
input_tokens,
output_tokens,
total_tokens,
input_cost_usd,
output_cost_usd,
total_cost_usd,
total_cost_jpy,
record_count: records.len(),
}
}
/// Group records by date (JST timezone)
pub fn group_by_date(&self, records: &[TokenRecord]) -> Result<HashMap<String, Vec<TokenRecord>>> {
let mut grouped: HashMap<String, Vec<TokenRecord>> = HashMap::new();
for record in records {
let date_str = self.extract_date_jst(&record.timestamp)?;
grouped.entry(date_str).or_insert_with(Vec::new).push(record.clone());
}
Ok(grouped)
}
/// Extract date in JST from timestamp
fn extract_date_jst(&self, timestamp: &str) -> Result<String> {
if timestamp.is_empty() {
return Err(anyhow!("Empty timestamp"));
}
// Try to parse various timestamp formats
let dt = if let Ok(dt) = DateTime::parse_from_rfc3339(timestamp) {
dt.with_timezone(&chrono_tz::Asia::Tokyo)
} else if let Ok(dt) = DateTime::parse_from_str(timestamp, "%Y-%m-%dT%H:%M:%S%.fZ") {
dt.with_timezone(&chrono_tz::Asia::Tokyo)
} else if let Ok(dt) = chrono::DateTime::parse_from_str(timestamp, "%Y-%m-%d %H:%M:%S") {
dt.with_timezone(&chrono_tz::Asia::Tokyo)
} else {
return Err(anyhow!("Failed to parse timestamp: {}", timestamp));
};
Ok(dt.format("%Y-%m-%d").to_string())
}
/// Generate daily breakdown
pub fn daily_breakdown(&self, records: &[TokenRecord]) -> Result<Vec<DailyBreakdown>> {
let grouped = self.group_by_date(records)?;
let mut breakdowns: Vec<DailyBreakdown> = grouped
.into_iter()
.map(|(date, date_records)| DailyBreakdown {
date,
summary: self.calculate_costs(&date_records),
})
.collect();
// Sort by date (most recent first)
breakdowns.sort_by(|a, b| b.date.cmp(&a.date));
Ok(breakdowns)
}
/// Filter records by time period
pub fn filter_by_period(&self, records: &[TokenRecord], period: &str) -> Result<Vec<TokenRecord>> {
let now = Local::now();
let cutoff = match period {
"today" => now.date_naive().and_hms_opt(0, 0, 0).unwrap(),
"week" => (now - chrono::Duration::days(7)).naive_local(),
"month" => (now - chrono::Duration::days(30)).naive_local(),
"all" => return Ok(records.to_vec()),
_ => return Err(anyhow!("Invalid period: {}", period)),
};
let filtered: Vec<TokenRecord> = records
.iter()
.filter(|record| {
if let Ok(date_str) = self.extract_date_jst(&record.timestamp) {
if let Ok(record_date) = chrono::NaiveDate::parse_from_str(&date_str, "%Y-%m-%d") {
return record_date.and_hms_opt(0, 0, 0).unwrap() >= cutoff;
}
}
false
})
.cloned()
.collect();
Ok(filtered)
}
}
/// Handle token-related commands
pub async fn handle_tokens(command: TokenCommands) -> Result<()> {
match command {
TokenCommands::Summary { period, claude_dir, details, format } => {
handle_summary(
period.unwrap_or_else(|| "week".to_string()),
claude_dir,
details,
format.unwrap_or_else(|| "table".to_string())
).await
}
TokenCommands::Daily { days, claude_dir } => {
handle_daily(days.unwrap_or(7), claude_dir).await
}
TokenCommands::Status { claude_dir } => {
handle_status(claude_dir).await
}
TokenCommands::Analyze { file } => {
println!("Token analysis for file: {:?} - Not implemented yet", file);
Ok(())
}
TokenCommands::Report { days } => {
println!("Token report for {} days - Not implemented yet", days.unwrap_or(7));
Ok(())
}
TokenCommands::Cost { month } => {
println!("Token cost for month: {} - Not implemented yet", month.unwrap_or_else(|| "current".to_string()));
Ok(())
}
}
}
/// Handle summary command
async fn handle_summary(
period: String,
claude_dir: Option<PathBuf>,
details: bool,
format: String,
) -> Result<()> {
let analyzer = TokenAnalyzer::new();
// Find Claude data directory
let data_dir = claude_dir.or_else(|| TokenAnalyzer::find_claude_data_dir())
.ok_or_else(|| anyhow!("Claude Code data directory not found"))?;
println!("Loading data from: {}", data_dir.display());
// Parse records
let all_records = analyzer.parse_jsonl_files(&data_dir)?;
if all_records.is_empty() {
println!("No token usage data found");
return Ok(());
}
// Filter by period
let filtered_records = analyzer.filter_by_period(&all_records, &period)?;
if filtered_records.is_empty() {
println!("No data found for period: {}", period);
return Ok(());
}
// Calculate summary
let summary = analyzer.calculate_costs(&filtered_records);
// Output results
match format.as_str() {
"json" => {
println!("{}", serde_json::to_string_pretty(&summary)?);
}
"table" | _ => {
print_summary_table(&summary, &period, details);
}
}
Ok(())
}
/// Handle daily command
async fn handle_daily(days: u32, claude_dir: Option<PathBuf>) -> Result<()> {
let analyzer = TokenAnalyzer::new();
// Find Claude data directory
let data_dir = claude_dir.or_else(|| TokenAnalyzer::find_claude_data_dir())
.ok_or_else(|| anyhow!("Claude Code data directory not found"))?;
println!("Loading data from: {}", data_dir.display());
// Parse records
let records = analyzer.parse_jsonl_files(&data_dir)?;
if records.is_empty() {
println!("No token usage data found");
return Ok(());
}
// Generate daily breakdown
let breakdown = analyzer.daily_breakdown(&records)?;
let limited_breakdown: Vec<_> = breakdown.into_iter().take(days as usize).collect();
// Print daily breakdown
print_daily_breakdown(&limited_breakdown);
Ok(())
}
/// Handle status command
async fn handle_status(claude_dir: Option<PathBuf>) -> Result<()> {
let analyzer = TokenAnalyzer::new();
// Find Claude data directory
let data_dir = claude_dir.or_else(|| TokenAnalyzer::find_claude_data_dir());
match data_dir {
Some(dir) => {
println!("Claude Code data directory: {}", dir.display());
// Parse records to get basic stats
let records = analyzer.parse_jsonl_files(&dir)?;
let summary = analyzer.calculate_costs(&records);
println!("Total records: {}", summary.record_count);
println!("Total tokens: {}", summary.total_tokens);
println!("Estimated total cost: ${:.4} USD (¥{:.0} JPY)",
summary.total_cost_usd, summary.total_cost_jpy);
}
None => {
println!("Claude Code data directory not found");
println!("Checked locations:");
println!(" - ~/.claude");
println!(" - ~/.config/claude");
println!(" - ./.claude");
}
}
Ok(())
}
/// Print summary table
fn print_summary_table(summary: &CostSummary, period: &str, details: bool) {
println!("\n=== Claude Code Token Usage Summary ({}) ===", period);
println!();
println!("📊 Token Usage:");
println!(" Input tokens: {:>12}", format_number(summary.input_tokens));
println!(" Output tokens: {:>12}", format_number(summary.output_tokens));
println!(" Total tokens: {:>12}", format_number(summary.total_tokens));
println!();
println!("💰 Cost Estimation:");
println!(" Input cost: {:>12}", format!("${:.4} USD", summary.input_cost_usd));
println!(" Output cost: {:>12}", format!("${:.4} USD", summary.output_cost_usd));
println!(" Total cost: {:>12}", format!("${:.4} USD", summary.total_cost_usd));
println!(" Total cost: {:>12}", format!("¥{:.0} JPY", summary.total_cost_jpy));
println!();
if details {
println!("📈 Additional Details:");
println!(" Records: {:>12}", format_number(summary.record_count as u64));
println!(" Avg per record:{:>12}", format!("${:.4} USD",
if summary.record_count > 0 { summary.total_cost_usd / summary.record_count as f64 } else { 0.0 }));
println!();
}
println!("💡 Cost calculation based on:");
println!(" Input: $3.00 per 1M tokens");
println!(" Output: $15.00 per 1M tokens");
println!(" USD to JPY: 150.0");
}
/// Print daily breakdown
fn print_daily_breakdown(breakdown: &[DailyBreakdown]) {
println!("\n=== Daily Token Usage Breakdown ===");
println!();
for daily in breakdown {
println!("📅 {} (Records: {})", daily.date, daily.summary.record_count);
println!(" Tokens: {} input + {} output = {} total",
format_number(daily.summary.input_tokens),
format_number(daily.summary.output_tokens),
format_number(daily.summary.total_tokens));
println!(" Cost: ${:.4} USD (¥{:.0} JPY)",
daily.summary.total_cost_usd,
daily.summary.total_cost_jpy);
println!();
}
}
/// Format large numbers with commas
fn format_number(n: u64) -> String {
let s = n.to_string();
let mut result = String::new();
for (i, c) in s.chars().rev().enumerate() {
if i > 0 && i % 3 == 0 {
result.push(',');
}
result.push(c);
}
result.chars().rev().collect()
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_cost_calculation() {
let analyzer = TokenAnalyzer::new();
let records = vec![
TokenRecord {
timestamp: "2024-01-01T10:00:00Z".to_string(),
usage: Some(TokenUsage {
input_tokens: Some(1000),
output_tokens: Some(500),
total_tokens: Some(1500),
}),
model: Some("claude-3".to_string()),
conversation_id: Some("test".to_string()),
},
];
let summary = analyzer.calculate_costs(&records);
assert_eq!(summary.input_tokens, 1000);
assert_eq!(summary.output_tokens, 500);
assert_eq!(summary.total_tokens, 1500);
assert_eq!(summary.record_count, 1);
}
#[test]
fn test_date_extraction() {
let analyzer = TokenAnalyzer::new();
let result = analyzer.extract_date_jst("2024-01-01T10:00:00Z");
assert!(result.is_ok());
// Note: The exact date depends on JST conversion
}
}

423
src/transmission.rs Normal file
View File

@@ -0,0 +1,423 @@
use std::collections::HashMap;
use serde::{Deserialize, Serialize};
use anyhow::{Result, Context};
use chrono::{DateTime, Utc};
use crate::config::Config;
use crate::persona::Persona;
use crate::relationship::{Relationship, RelationshipStatus};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TransmissionLog {
pub user_id: String,
pub message: String,
pub timestamp: DateTime<Utc>,
pub transmission_type: TransmissionType,
pub success: bool,
pub error: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum TransmissionType {
Autonomous, // AI decided to send
Scheduled, // Time-based trigger
Breakthrough, // Fortune breakthrough triggered
Maintenance, // Daily maintenance message
}
impl std::fmt::Display for TransmissionType {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
TransmissionType::Autonomous => write!(f, "autonomous"),
TransmissionType::Scheduled => write!(f, "scheduled"),
TransmissionType::Breakthrough => write!(f, "breakthrough"),
TransmissionType::Maintenance => write!(f, "maintenance"),
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TransmissionController {
config: Config,
transmission_history: Vec<TransmissionLog>,
last_check: Option<DateTime<Utc>>,
}
impl TransmissionController {
pub fn new(config: Config) -> Result<Self> {
let transmission_history = Self::load_transmission_history(&config)?;
Ok(TransmissionController {
config,
transmission_history,
last_check: None,
})
}
pub async fn check_autonomous_transmissions(&mut self, persona: &mut Persona) -> Result<Vec<TransmissionLog>> {
let mut transmissions = Vec::new();
let now = Utc::now();
// Get all transmission-eligible relationships
let eligible_user_ids: Vec<String> = {
let relationships = persona.list_all_relationships();
relationships.iter()
.filter(|(_, rel)| rel.transmission_enabled && !rel.is_broken)
.filter(|(_, rel)| rel.score >= rel.threshold)
.map(|(id, _)| id.clone())
.collect()
};
for user_id in eligible_user_ids {
// Get fresh relationship data for each check
if let Some(relationship) = persona.get_relationship(&user_id) {
// Check if enough time has passed since last transmission
if let Some(last_transmission) = relationship.last_transmission {
let hours_since_last = (now - last_transmission).num_hours();
if hours_since_last < 24 {
continue; // Skip if transmitted in last 24 hours
}
}
// Check if conditions are met for autonomous transmission
if self.should_transmit_to_user(&user_id, relationship, persona)? {
let transmission = self.generate_autonomous_transmission(persona, &user_id).await?;
transmissions.push(transmission);
}
}
}
self.last_check = Some(now);
self.save_transmission_history()?;
Ok(transmissions)
}
pub async fn check_breakthrough_transmissions(&mut self, persona: &mut Persona) -> Result<Vec<TransmissionLog>> {
let mut transmissions = Vec::new();
let state = persona.get_current_state()?;
// Only trigger breakthrough transmissions if fortune is very high
if !state.breakthrough_triggered || state.fortune_value < 9 {
return Ok(transmissions);
}
// Get close relationships for breakthrough sharing
let relationships = persona.list_all_relationships();
let close_friends: Vec<_> = relationships.iter()
.filter(|(_, rel)| matches!(rel.status, RelationshipStatus::Friend | RelationshipStatus::CloseFriend))
.filter(|(_, rel)| rel.transmission_enabled && !rel.is_broken)
.collect();
for (user_id, _relationship) in close_friends {
// Check if we haven't sent a breakthrough message today
let today = chrono::Utc::now().date_naive();
let already_sent_today = self.transmission_history.iter()
.any(|log| {
log.user_id == *user_id &&
matches!(log.transmission_type, TransmissionType::Breakthrough) &&
log.timestamp.date_naive() == today
});
if !already_sent_today {
let transmission = self.generate_breakthrough_transmission(persona, user_id).await?;
transmissions.push(transmission);
}
}
Ok(transmissions)
}
pub async fn check_maintenance_transmissions(&mut self, persona: &mut Persona) -> Result<Vec<TransmissionLog>> {
let mut transmissions = Vec::new();
let now = Utc::now();
// Only send maintenance messages once per day
let today = now.date_naive();
let already_sent_today = self.transmission_history.iter()
.any(|log| {
matches!(log.transmission_type, TransmissionType::Maintenance) &&
log.timestamp.date_naive() == today
});
if already_sent_today {
return Ok(transmissions);
}
// Apply daily maintenance to persona
persona.daily_maintenance()?;
// Get relationships that might need a maintenance check-in
let relationships = persona.list_all_relationships();
let maintenance_candidates: Vec<_> = relationships.iter()
.filter(|(_, rel)| rel.transmission_enabled && !rel.is_broken)
.filter(|(_, rel)| {
// Send maintenance to relationships that haven't been contacted in a while
if let Some(last_interaction) = rel.last_interaction {
let days_since = (now - last_interaction).num_days();
days_since >= 7 // Haven't talked in a week
} else {
false
}
})
.take(3) // Limit to 3 maintenance messages per day
.collect();
for (user_id, _) in maintenance_candidates {
let transmission = self.generate_maintenance_transmission(persona, user_id).await?;
transmissions.push(transmission);
}
Ok(transmissions)
}
fn should_transmit_to_user(&self, user_id: &str, relationship: &Relationship, persona: &Persona) -> Result<bool> {
// Basic transmission criteria
if !relationship.transmission_enabled || relationship.is_broken {
return Ok(false);
}
// Score must be above threshold
if relationship.score < relationship.threshold {
return Ok(false);
}
// Check transmission cooldown
if let Some(last_transmission) = relationship.last_transmission {
let hours_since = (Utc::now() - last_transmission).num_hours();
if hours_since < 24 {
return Ok(false);
}
}
// Calculate transmission probability based on relationship strength
let base_probability = match relationship.status {
RelationshipStatus::New => 0.1,
RelationshipStatus::Acquaintance => 0.2,
RelationshipStatus::Friend => 0.4,
RelationshipStatus::CloseFriend => 0.6,
RelationshipStatus::Broken => 0.0,
};
// Modify probability based on fortune
let state = persona.get_current_state()?;
let fortune_modifier = (state.fortune_value as f64 - 5.0) / 10.0; // -0.4 to +0.5
let final_probability = (base_probability + fortune_modifier).max(0.0).min(1.0);
// Simple random check (in real implementation, this would be more sophisticated)
use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};
let mut hasher = DefaultHasher::new();
user_id.hash(&mut hasher);
Utc::now().timestamp().hash(&mut hasher);
let hash = hasher.finish();
let random_value = (hash % 100) as f64 / 100.0;
Ok(random_value < final_probability)
}
async fn generate_autonomous_transmission(&mut self, persona: &mut Persona, user_id: &str) -> Result<TransmissionLog> {
let now = Utc::now();
// Get recent memories for context
let memories = persona.get_memories(user_id, 3);
let context = if !memories.is_empty() {
format!("Based on our recent conversations: {}", memories.join(", "))
} else {
"Starting a spontaneous conversation".to_string()
};
// Generate message using AI if available
let message = match self.generate_ai_message(persona, user_id, &context, TransmissionType::Autonomous).await {
Ok(msg) => msg,
Err(_) => {
// Fallback to simple messages
let fallback_messages = [
"Hey! How have you been?",
"Just thinking about our last conversation...",
"Hope you're having a good day!",
"Something interesting happened today and it reminded me of you.",
];
let index = (now.timestamp() as usize) % fallback_messages.len();
fallback_messages[index].to_string()
}
};
let log = TransmissionLog {
user_id: user_id.to_string(),
message,
timestamp: now,
transmission_type: TransmissionType::Autonomous,
success: true, // For now, assume success
error: None,
};
self.transmission_history.push(log.clone());
Ok(log)
}
async fn generate_breakthrough_transmission(&mut self, persona: &mut Persona, user_id: &str) -> Result<TransmissionLog> {
let now = Utc::now();
let state = persona.get_current_state()?;
let message = match self.generate_ai_message(persona, user_id, "Breakthrough moment - feeling inspired!", TransmissionType::Breakthrough).await {
Ok(msg) => msg,
Err(_) => {
format!("Amazing day today! ⚡ Fortune is at {}/10 and I'm feeling incredibly inspired. Had to share this energy with you!", state.fortune_value)
}
};
let log = TransmissionLog {
user_id: user_id.to_string(),
message,
timestamp: now,
transmission_type: TransmissionType::Breakthrough,
success: true,
error: None,
};
self.transmission_history.push(log.clone());
Ok(log)
}
async fn generate_maintenance_transmission(&mut self, persona: &mut Persona, user_id: &str) -> Result<TransmissionLog> {
let now = Utc::now();
let message = match self.generate_ai_message(persona, user_id, "Maintenance check-in", TransmissionType::Maintenance).await {
Ok(msg) => msg,
Err(_) => {
"Hey! It's been a while since we last talked. Just checking in to see how you're doing!".to_string()
}
};
let log = TransmissionLog {
user_id: user_id.to_string(),
message,
timestamp: now,
transmission_type: TransmissionType::Maintenance,
success: true,
error: None,
};
self.transmission_history.push(log.clone());
Ok(log)
}
async fn generate_ai_message(&self, _persona: &mut Persona, _user_id: &str, context: &str, transmission_type: TransmissionType) -> Result<String> {
// Try to use AI for message generation
let _system_prompt = format!(
"You are initiating a {} conversation. Context: {}. Keep the message casual, personal, and under 100 characters. Show genuine interest in the person.",
transmission_type, context
);
// This is a simplified version - in a real implementation, we'd use the AI provider
// For now, return an error to trigger fallback
Err(anyhow::anyhow!("AI provider not available for transmission generation"))
}
fn get_eligible_relationships(&self, persona: &Persona) -> Vec<String> {
persona.list_all_relationships().iter()
.filter(|(_, rel)| rel.transmission_enabled && !rel.is_broken)
.filter(|(_, rel)| rel.score >= rel.threshold)
.map(|(id, _)| id.clone())
.collect()
}
pub fn get_transmission_stats(&self) -> TransmissionStats {
let total_transmissions = self.transmission_history.len();
let successful_transmissions = self.transmission_history.iter()
.filter(|log| log.success)
.count();
let today = Utc::now().date_naive();
let today_transmissions = self.transmission_history.iter()
.filter(|log| log.timestamp.date_naive() == today)
.count();
let by_type = {
let mut counts = HashMap::new();
for log in &self.transmission_history {
*counts.entry(log.transmission_type.to_string()).or_insert(0) += 1;
}
counts
};
TransmissionStats {
total_transmissions,
successful_transmissions,
today_transmissions,
success_rate: if total_transmissions > 0 {
successful_transmissions as f64 / total_transmissions as f64
} else {
0.0
},
by_type,
}
}
pub fn get_recent_transmissions(&self, limit: usize) -> Vec<&TransmissionLog> {
let mut logs: Vec<_> = self.transmission_history.iter().collect();
logs.sort_by(|a, b| b.timestamp.cmp(&a.timestamp));
logs.into_iter().take(limit).collect()
}
fn load_transmission_history(config: &Config) -> Result<Vec<TransmissionLog>> {
let file_path = config.transmission_file();
if !file_path.exists() {
return Ok(Vec::new());
}
let content = std::fs::read_to_string(file_path)
.context("Failed to read transmission history file")?;
let history: Vec<TransmissionLog> = serde_json::from_str(&content)
.context("Failed to parse transmission history file")?;
Ok(history)
}
fn save_transmission_history(&self) -> Result<()> {
let content = serde_json::to_string_pretty(&self.transmission_history)
.context("Failed to serialize transmission history")?;
std::fs::write(&self.config.transmission_file(), content)
.context("Failed to write transmission history file")?;
Ok(())
}
pub async fn check_and_send(&mut self) -> Result<Vec<(String, String)>> {
let config = self.config.clone();
let mut persona = Persona::new(&config)?;
let mut results = Vec::new();
// Check autonomous transmissions
let autonomous = self.check_autonomous_transmissions(&mut persona).await?;
for log in autonomous {
if log.success {
results.push((log.user_id, log.message));
}
}
// Check breakthrough transmissions
let breakthrough = self.check_breakthrough_transmissions(&mut persona).await?;
for log in breakthrough {
if log.success {
results.push((log.user_id, log.message));
}
}
Ok(results)
}
}
#[derive(Debug, Clone)]
pub struct TransmissionStats {
pub total_transmissions: usize,
pub successful_transmissions: usize,
pub today_transmissions: usize,
pub success_rate: f64,
pub by_type: HashMap<String, usize>,
}