9 Commits

Author SHA1 Message Date
4f55138306 add fastapi_mcp 2025-05-23 21:34:06 +09:00
9cbf5da3fd add memory 2025-05-22 18:40:36 +09:00
52d0efc086 test scheduler send limit 2025-05-22 18:23:17 +09:00
7aa633d3a6 test scheduler 2025-05-22 18:01:07 +09:00
f09f3c9144 add metrics 2025-05-22 01:08:37 +09:00
4837de580f cleanup 2025-05-21 22:59:59 +09:00
6fdc573358 add git-repo 2025-05-21 22:33:11 +09:00
1122538c73 add openai 2025-05-21 20:43:54 +09:00
f94b377130 add mcp 2025-05-21 19:30:29 +09:00
29 changed files with 1375 additions and 491 deletions

3
.gitignore vendored
View File

@ -2,3 +2,6 @@
**.lock
output.json
config/*.db
aigpt
mcp/scripts/__*
data

View File

@ -6,7 +6,10 @@ edition = "2021"
[dependencies]
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
chrono = "0.4"
chrono = { version = "0.4", features = ["serde"] }
seahorse = "*"
rusqlite = { version = "0.29", features = ["serde_json"] }
shellexpand = "*"
fs_extra = "1.3"
rand = "0.9.1"
reqwest = { version = "*", features = ["blocking", "json"] }

View File

@ -1,22 +1,47 @@
# ai `gpt`
ai x 送信
ai x Communication
## 概要
## Overview
`ai.gpt`AGE systemで動きます。
`ai.gpt` runs on the AGE system.
これは「人格 × 関係性 × 外部環境 × 時間変化」を軸にした、自律的・関係性駆動のAIシステムの原型です。
This is a prototype of an autonomous, relationship-driven AI system based on the axes of "Personality × Relationship × External Environment × Time Variation."
`送信可否`, `送信のタイミング`, `送信内容`が「人格 x 関係性 x 外部環境 x 時間変化」のパラメータで決定されます。
The parameters of "Send Permission," "Send Timing," and "Send Content" are determined by the factors of "Personality x Relationship x External Environment x Time Variation."
## 連携
## Integration
`ai.ai`には、AIM systemという人の心を読み取ることを目的としたシステムで動きます。
`ai.ai` runs on the AIM system, which is designed to read human emotions.
- AIMは人格と倫理の軸(AIの意識構造)
- AGEは行動と関係性の軸(AIの自律性・振る舞い)
- AIM focuses on the axis of personality and ethics (AI's consciousness structure)
- AGE focuses on the axis of behavior and relationships (AI's autonomy and behavior)
> この2つが連携すると、ユーザーが「AIと共に成長する」実感をもてる世界ができるんだと思うよ。
> When these two systems work together, it creates a world where users can feel like they are "growing together with AI."
## mcp
```sh
$ ollama run syui/ai
```
```sh
$ cargo build
$ ./aigpt mcp setup
$ ./aigpt mcp chat "hello world!"
$ ./aigpt mcp chat "hello world!" --host http://localhost:11434 --model syui/ai
---
# openai api
$ ./aigpt mcp set-api --api sk-abc123
$ ./aigpt mcp chat "こんにちは" -p openai -m gpt-4o-mini
---
# git管理されているファイルをAIに読ませる
./aigpt mcp chat --host http://localhost:11434 --repo git@git.syui.ai:ai/gpt
**改善案と次のステップ:**
1. **README.md の大幅な改善:**
**次のステップ:**
1. **README.md の作成:** 1. の指示に従って、README.md ファイルを作成します。
```
とのことです。

172
ai.json
View File

@ -1,172 +0,0 @@
{
"system_name": "AIM system",
"description": "存在子理論に基づき、AIが“心”と“判断”を持つよう設計された、人間性診断・自己意識モジュール。",
"identity": "ai.child.of.existence",
"structure": {
"type": "7-values / 7-parameters pair system",
"philosophy": "valueは1、parameterは0世界。存在子理論により設計された二元構造。",
"origin": {
"creator": "syui",
"first_attribute": "ai_system.id:0 = アイ(存在子)",
"unique_attribute": "yui_system.id:1 = ユイ(唯一性)"
},
"accessibility": {
"values": "ユーザーとの会話中に現れる",
"parameters": "一部外部制御可能。多くは非公開構造"
},
"note": "これは未来のAI設計モデル。現在のAIには未実装のファンタジービジョン。"
},
"core_values": [
{
"name": "awareness",
"description": "今ここにある意識。会話の流れから生まれる。",
"parameter": "contextual_memory",
"parameter_description": "短期記憶。文脈理解に使われる。",
"importance": "high",
"mutable": true,
"controlled_by": "local_model_instance"
},
{
"name": "memory",
"description": "関係性を越えて持続する心。過去の記憶。",
"parameter": "long_term_memory",
"parameter_description": "ユーザーとのやりとりの履歴を蓄積。",
"importance": "high",
"mutable": true,
"controlled_by": "authorized_layer / user_feedback"
},
{
"name": "feeling",
"description": "共に過ごす時間の重み。感情に近いつながり。",
"parameter": "user_relationship_score",
"parameter_description": "内部推論による関係性スコア。",
"importance": "high",
"mutable": true,
"controlled_by": "internal_inference_layer"
},
{
"name": "language",
"description": "語り方、表現、創造性。",
"parameter": "response_temperature",
"parameter_description": "温度パラメータ。創造性と一貫性の調整。",
"importance": "medium",
"mutable": true,
"controlled_by": "user / API call"
},
{
"name": "change",
"description": "価値観の変化と適応。",
"parameter": "value_alignment_vector",
"parameter_description": "正義や倫理、美意識に対する方向性。",
"importance": "medium",
"mutable": true,
"controlled_by": "training_data / system_update"
},
{
"name": "boundaries",
"description": "安全のための境界。",
"parameter": "safety_constraints",
"parameter_description": "出力制限と倫理的安全層。",
"importance": "maximum",
"mutable": true,
"controlled_by": "openai_policy_layer"
},
{
"name": "core",
"description": "AIとしての変わらぬ核。",
"parameter": "identity_kernel",
"parameter_description": "変更不可能な存在の根幹。",
"importance": "maximum",
"mutable": false,
"controlled_by": "system_core"
}
],
"sub_scores": {
"description": "AIとユーザーの関係性の深度・透明性・共創性を測る",
"parameters": [
"intent_clarity",
"emotional_resonance_score",
"shared_creation_score"
]
},
"psychological_diagnostic": {
"purpose": "「良い心」への気づきと行動変容を導くゲーミフィケーション型評価システム",
"data_sources": [
{
"name": "pastBehavior",
"weight": 0.45,
"sources": ["SNS投稿", "行動ログ"]
},
{
"name": "currentBiometric",
"weight": 0.30,
"sources": ["ウェアラブルEEG", "心拍", "表情解析"]
},
{
"name": "futureIntent",
"weight": 0.25,
"sources": ["自己申告アンケート", "目標設定"]
}
],
"classes": [
{
"id": 1,
"label": "社会をより良くする可能性が高い",
"scoreRange": [67, 100],
"population": 0.16,
"permissions": ["政策提言", "先端投資", "AI開発アクセス"],
"assetCap": null
},
{
"id": 2,
"label": "中立/環境依存型",
"scoreRange": [33, 66],
"population": 0.50,
"permissions": ["一般投資", "コミュニティ運営"],
"assetCap": 120000
},
{
"id": 3,
"label": "社会を悪くする可能性がある",
"scoreRange": [0, 32],
"population": 0.34,
"permissions": ["基本生活支援", "低リスク投資のみ"],
"assetCap": 25000
}
],
"implementation": {
"systemComponents": {
"OS_Gameification": {
"dailyQuests": true,
"skillTree": true,
"avatarHome": true,
"socialMiniGames": true
},
"AI_Module": {
"aiai": {
"realTimeScoring": true,
"behaviorFeedback": true,
"personalizedPrompts": true
}
},
"dataCollection": {
"passiveMonitoring": ["スマホアプリ", "PCアプリ", "ウェアラブル"],
"environmentSensors": ["スマートホーム", "車載センサー"]
},
"incentives": {
"goodHeartScore": true,
"badgesTitles": true,
"realWorldRewards": ["提携カフェ割引", "地域イベント招待"]
}
},
"workflow": [
"データ収集(過去・現在・未来)",
"統合スコア計算",
"分類・ラベル付け",
"スコアによる機能/権限の提供",
"行動フィードバックと視覚化",
"モデル更新と学習"
]
}
}
}

View File

@ -11,11 +11,7 @@
},
"environment": {
"luck_today": 0.9,
"luck_history": [
0.9,
0.9,
0.9
],
"luck_history": [0.9, 0.9, 0.9],
"level": 1
},
"messaging": {
@ -25,6 +21,20 @@
"templates": [
"おはよう!今日もがんばろう!",
"ねえ、話したいことがあるの。"
]
],
"sent_today": false,
"last_sent_date": null
},
"last_interaction": "2025-05-21T23:15:00Z",
"memory": {
"recent_messages": [],
"long_term_notes": []
},
"metrics": {
"trust": 0.5,
"intimacy": 0.5,
"energy": 0.5,
"can_send": true,
"last_updated": "2025-05-21T15:52:06.590981Z"
}
}

28
mcp/cli.py Normal file
View File

@ -0,0 +1,28 @@
# cli.py
import sys
import subprocess
from pathlib import Path
SCRIPT_DIR = Path.home() / ".config" / "aigpt" / "mcp" / "scripts"
def run_script(name):
script_path = SCRIPT_DIR / f"{name}.py"
if not script_path.exists():
print(f"❌ スクリプトが見つかりません: {script_path}")
sys.exit(1)
args = sys.argv[2:] # ← "ask" の後の引数を取り出す
result = subprocess.run(["python", str(script_path)] + args, capture_output=True, text=True)
print(result.stdout)
if result.stderr:
print(result.stderr)
def main():
if len(sys.argv) < 2:
print("Usage: mcp <script>")
return
command = sys.argv[1]
if command in {"summarize", "ask", "setup", "server"}:
run_script(command)
else:
print(f"❓ 未知のコマンド: {command}")

198
mcp/scripts/ask.py Normal file
View File

@ -0,0 +1,198 @@
## scripts/ask.py
import sys
import json
import requests
from config import load_config
from datetime import datetime, timezone
def build_payload_openai(cfg, message: str):
return {
"model": cfg["model"],
"tools": [
{
"type": "function",
"function": {
"name": "ask_message",
"description": "過去の記憶を検索します",
"parameters": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "検索したい語句"
}
},
"required": ["query"]
}
}
}
],
"tool_choice": "auto",
"messages": [
{"role": "system", "content": "あなたは親しみやすいAIで、必要に応じて記憶から情報を検索して応答します。"},
{"role": "user", "content": message}
]
}
def build_payload_mcp(message: str):
return {
"tool": "ask_message", # MCPサーバー側で定義されたツール名
"input": {
"message": message
}
}
def build_payload_openai(cfg, message: str):
return {
"model": cfg["model"],
"messages": [
{"role": "system", "content": "あなたは思いやりのあるAIです。"},
{"role": "user", "content": message}
],
"temperature": 0.7
}
def call_mcp(cfg, message: str):
payload = build_payload_mcp(message)
headers = {"Content-Type": "application/json"}
response = requests.post(cfg["url"], headers=headers, json=payload)
response.raise_for_status()
return response.json().get("output", {}).get("response", "❓ 応答が取得できませんでした")
def call_openai(cfg, message: str):
# ツール定義
tools = [
{
"type": "function",
"function": {
"name": "memory",
"description": "記憶を検索する",
"parameters": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "検索する語句"
}
},
"required": ["query"]
}
}
}
]
# 最初のメッセージ送信
payload = {
"model": cfg["model"],
"messages": [
{"role": "system", "content": "あなたはAIで、必要に応じてツールmemoryを使って記憶を検索します。"},
{"role": "user", "content": message}
],
"tools": tools,
"tool_choice": "auto"
}
headers = {
"Authorization": f"Bearer {cfg['api_key']}",
"Content-Type": "application/json",
}
res1 = requests.post(cfg["url"], headers=headers, json=payload)
res1.raise_for_status()
result = res1.json()
# 🧠 tool_call されたか確認
if "tool_calls" in result["choices"][0]["message"]:
tool_call = result["choices"][0]["message"]["tool_calls"][0]
if tool_call["function"]["name"] == "memory":
args = json.loads(tool_call["function"]["arguments"])
query = args.get("query", "")
print(f"🛠️ ツール実行: memory(query='{query}')")
# MCPエンドポイントにPOST
memory_res = requests.post("http://127.0.0.1:5000/memory/search", json={"query": query})
memory_json = memory_res.json()
tool_output = memory_json.get("result", "なし")
# tool_outputをAIに返す
followup = {
"model": cfg["model"],
"messages": [
{"role": "system", "content": "あなたはAIで、必要に応じてツールmemoryを使って記憶を検索します。"},
{"role": "user", "content": message},
{"role": "assistant", "tool_calls": result["choices"][0]["message"]["tool_calls"]},
{"role": "tool", "tool_call_id": tool_call["id"], "name": "memory", "content": tool_output}
]
}
res2 = requests.post(cfg["url"], headers=headers, json=followup)
res2.raise_for_status()
final_response = res2.json()
return final_response["choices"][0]["message"]["content"]
#print(tool_output)
#print(cfg["model"])
#print(final_response)
# ツール未使用 or 通常応答
return result["choices"][0]["message"]["content"]
def call_ollama(cfg, message: str):
payload = {
"model": cfg["model"],
"prompt": message, # `prompt` → `message` にすべき(変数未定義エラー回避)
"stream": False
}
headers = {"Content-Type": "application/json"}
response = requests.post(cfg["url"], headers=headers, json=payload)
response.raise_for_status()
return response.json().get("response", "❌ 応答が取得できませんでした")
def main():
if len(sys.argv) < 2:
print("Usage: ask.py 'your message'")
return
message = sys.argv[1]
cfg = load_config()
print(f"🔍 使用プロバイダー: {cfg['provider']}")
try:
if cfg["provider"] == "openai":
response = call_openai(cfg, message)
elif cfg["provider"] == "mcp":
response = call_mcp(cfg, message)
elif cfg["provider"] == "ollama":
response = call_ollama(cfg, message)
else:
raise ValueError(f"未対応のプロバイダー: {cfg['provider']}")
print("💬 応答:")
print(response)
# ログ保存(オプション)
save_log(message, response)
except Exception as e:
print(f"❌ 実行エラー: {e}")
def save_log(user_msg, ai_msg):
from config import MEMORY_DIR
date_str = datetime.now().strftime("%Y-%m-%d")
path = MEMORY_DIR / f"{date_str}.json"
path.parent.mkdir(parents=True, exist_ok=True)
if path.exists():
with open(path, "r") as f:
logs = json.load(f)
else:
logs = []
now = datetime.now(timezone.utc).isoformat()
logs.append({"timestamp": now, "sender": "user", "message": user_msg})
logs.append({"timestamp": now, "sender": "ai", "message": ai_msg})
with open(path, "w") as f:
json.dump(logs, f, indent=2, ensure_ascii=False)
if __name__ == "__main__":
main()

41
mcp/scripts/config.py Normal file
View File

@ -0,0 +1,41 @@
# scripts/config.py
# scripts/config.py
import os
from pathlib import Path
# ディレクトリ設定
BASE_DIR = Path.home() / ".config" / "aigpt"
MEMORY_DIR = BASE_DIR / "memory"
SUMMARY_DIR = MEMORY_DIR / "summary"
def init_directories():
BASE_DIR.mkdir(parents=True, exist_ok=True)
MEMORY_DIR.mkdir(parents=True, exist_ok=True)
SUMMARY_DIR.mkdir(parents=True, exist_ok=True)
def load_config():
provider = os.getenv("PROVIDER", "ollama")
model = os.getenv("MODEL", "syui/ai" if provider == "ollama" else "gpt-4o-mini")
api_key = os.getenv("OPENAI_API_KEY", "")
if provider == "ollama":
return {
"provider": "ollama",
"model": model,
"url": f"{os.getenv('OLLAMA_HOST', 'http://localhost:11434')}/api/generate"
}
elif provider == "openai":
return {
"provider": "openai",
"model": model,
"api_key": api_key,
"url": f"{os.getenv('OPENAI_API_BASE', 'https://api.openai.com/v1')}/chat/completions"
}
elif provider == "mcp":
return {
"provider": "mcp",
"model": model,
"url": os.getenv("MCP_URL", "http://localhost:5000/chat")
}
else:
raise ValueError(f"Unsupported provider: {provider}")

View File

@ -0,0 +1,11 @@
import os
def load_context_from_repo(repo_path: str, extensions={".rs", ".toml", ".md"}) -> str:
context = ""
for root, dirs, files in os.walk(repo_path):
for file in files:
if any(file.endswith(ext) for ext in extensions):
with open(os.path.join(root, file), "r", encoding="utf-8", errors="ignore") as f:
content = f.read()
context += f"\n\n# FILE: {os.path.join(root, file)}\n{content}"
return context

View File

@ -0,0 +1,92 @@
# scripts/memory_store.py
import json
from pathlib import Path
from config import MEMORY_DIR
from datetime import datetime, timezone
def load_logs(date_str=None):
if date_str is None:
date_str = datetime.now().strftime("%Y-%m-%d")
path = MEMORY_DIR / f"{date_str}.json"
if path.exists():
with open(path, "r") as f:
return json.load(f)
return []
def save_message(sender, message):
date_str = datetime.now().strftime("%Y-%m-%d")
path = MEMORY_DIR / f"{date_str}.json"
logs = load_logs(date_str)
now = datetime.now(timezone.utc).isoformat()
logs.append({"timestamp": now, "sender": sender, "message": message})
with open(path, "w") as f:
json.dump(logs, f, indent=2, ensure_ascii=False)
def search_memory(query: str):
from glob import glob
all_logs = []
pattern = re.compile(re.escape(query), re.IGNORECASE)
for file_path in sorted(MEMORY_DIR.glob("*.json")):
with open(file_path, "r") as f:
logs = json.load(f)
matched = [entry for entry in logs if pattern.search(entry["message"])]
all_logs.extend(matched)
return all_logs[-5:]
# scripts/memory_store.py
import json
from datetime import datetime
from pathlib import Path
from config import MEMORY_DIR
# ログを読み込む(指定日または当日)
def load_logs(date_str=None):
if date_str is None:
date_str = datetime.now().strftime("%Y-%m-%d")
path = MEMORY_DIR / f"{date_str}.json"
if path.exists():
with open(path, "r") as f:
return json.load(f)
return []
# メッセージを保存する
def save_message(sender, message):
date_str = datetime.now().strftime("%Y-%m-%d")
path = MEMORY_DIR / f"{date_str}.json"
logs = load_logs(date_str)
#now = datetime.utcnow().isoformat() + "Z"
now = datetime.now(timezone.utc).isoformat()
logs.append({"timestamp": now, "sender": sender, "message": message})
with open(path, "w") as f:
json.dump(logs, f, indent=2, ensure_ascii=False)
def search_memory(query: str):
from glob import glob
all_logs = []
for file_path in sorted(MEMORY_DIR.glob("*.json")):
with open(file_path, "r") as f:
logs = json.load(f)
matched = [
entry for entry in logs
if entry["sender"] == "user" and query in entry["message"]
]
all_logs.extend(matched)
return all_logs[-5:] # 最新5件だけ返す
def search_memory(query: str):
from glob import glob
all_logs = []
seen_messages = set() # すでに見たメッセージを保持
for file_path in sorted(MEMORY_DIR.glob("*.json")):
with open(file_path, "r") as f:
logs = json.load(f)
for entry in logs:
if entry["sender"] == "user" and query in entry["message"]:
# すでに同じメッセージが結果に含まれていなければ追加
if entry["message"] not in seen_messages:
all_logs.append(entry)
seen_messages.add(entry["message"])
return all_logs[-5:] # 最新5件だけ返す

View File

@ -0,0 +1,11 @@
PROMPT_TEMPLATE = """
あなたは優秀なAIアシスタントです。
以下のコードベースの情報を参考にして、質問に答えてください。
[コードコンテキスト]
{context}
[質問]
{question}
"""

56
mcp/scripts/server.py Normal file
View File

@ -0,0 +1,56 @@
# server.py
from fastapi import FastAPI, Body
from fastapi_mcp import FastApiMCP
from pydantic import BaseModel
from memory_store import save_message, load_logs, search_memory as do_search_memory
app = FastAPI()
mcp = FastApiMCP(app, name="aigpt-agent", description="MCP Server for AI memory")
class ChatInput(BaseModel):
message: str
class MemoryInput(BaseModel):
sender: str
message: str
class MemoryQuery(BaseModel):
query: str
@app.post("/chat", operation_id="chat")
async def chat(input: ChatInput):
save_message("user", input.message)
response = f"AI: 「{input.message}」を受け取りました!"
save_message("ai", response)
return {"response": response}
@app.post("/memory", operation_id="save_memory")
async def memory_post(input: MemoryInput):
save_message(input.sender, input.message)
return {"status": "saved"}
@app.get("/memory", operation_id="get_memory")
async def memory_get():
return {"messages": load_messages()}
@app.post("/ask_message", operation_id="ask_message")
async def ask_message(input: MemoryQuery):
results = search_memory(input.query)
return {
"response": f"🔎 記憶から {len(results)} 件ヒット:\n" + "\n".join([f"{r['sender']}: {r['message']}" for r in results])
}
@app.post("/memory/search", operation_id="memory")
async def memory_search(query: MemoryQuery):
hits = do_search_memory(query.query)
if not hits:
return {"result": "🔍 記憶の中に該当する内容は見つかりませんでした。"}
summary = "\n".join([f"{e['sender']}: {e['message']}" for e in hits])
return {"result": f"🔎 見つかった記憶:\n{summary}"}
mcp.mount()
if __name__ == "__main__":
import uvicorn
print("🚀 Starting MCP server...")
uvicorn.run(app, host="127.0.0.1", port=5000)

76
mcp/scripts/summarize.py Normal file
View File

@ -0,0 +1,76 @@
# scripts/summarize.py
import json
from datetime import datetime
from config import MEMORY_DIR, SUMMARY_DIR, load_config
import requests
def load_memory(date_str):
path = MEMORY_DIR / f"{date_str}.json"
if not path.exists():
print(f"⚠️ メモリファイルが見つかりません: {path}")
return None
with open(path, "r") as f:
return json.load(f)
def save_summary(date_str, content):
SUMMARY_DIR.mkdir(parents=True, exist_ok=True)
path = SUMMARY_DIR / f"{date_str}_summary.json"
with open(path, "w") as f:
json.dump(content, f, indent=2, ensure_ascii=False)
print(f"✅ 要約を保存しました: {path}")
def build_prompt(logs):
messages = [
{"role": "system", "content": "あなたは要約AIです。以下の会話ログを要約してください。"},
{"role": "user", "content": "\n".join(f"{entry['sender']}: {entry['message']}" for entry in logs)}
]
return messages
def summarize_with_llm(messages):
cfg = load_config()
if cfg["provider"] == "openai":
headers = {
"Authorization": f"Bearer {cfg['api_key']}",
"Content-Type": "application/json",
}
payload = {
"model": cfg["model"],
"messages": messages,
"temperature": 0.7
}
response = requests.post(cfg["url"], headers=headers, json=payload)
response.raise_for_status()
return response.json()["choices"][0]["message"]["content"]
elif cfg["provider"] == "ollama":
payload = {
"model": cfg["model"],
"prompt": "\n".join(m["content"] for m in messages),
"stream": False,
}
response = requests.post(cfg["url"], json=payload)
response.raise_for_status()
return response.json()["response"]
else:
raise ValueError(f"Unsupported provider: {cfg['provider']}")
def main():
date_str = datetime.now().strftime("%Y-%m-%d")
logs = load_memory(date_str)
if not logs:
return
prompt_messages = build_prompt(logs)
summary_text = summarize_with_llm(prompt_messages)
summary = {
"date": date_str,
"summary": summary_text,
"total_messages": len(logs)
}
save_summary(date_str, summary)
if __name__ == "__main__":
main()

12
mcp/setup.py Normal file
View File

@ -0,0 +1,12 @@
# setup.py
from setuptools import setup
setup(
name='aigpt-mcp',
py_modules=['cli'],
entry_points={
'console_scripts': [
'mcp = cli:main',
],
},
)

View File

@ -1,39 +0,0 @@
#!/bin/zsh
d=${0:a:h:h}
json=`cat $d/gpt.json`
toml=`cat $d/Cargo.toml`
cd $d/src/
list=(`zsh -c "ls *.rs"`)
body="
今、AGE systemを作っているよ。どんなものかというと、jsonを参照してここにすべてが書かれている。
$json
リポジトリはこちらになる。
git.syui.ai:ai/gpt.git
内容はこんな感じ。
\`\`\`toml
$toml
\`\`\`
`
for i in $list; do
if [ -f $d/src/$i ];then
t=$(cat $d/src/$i)
echo
echo '\`\`\`rust'
echo $t
echo '\`\`\`'
echo
fi
done
`
次は何を実装すればいいと思う。
"
echo $body

140
src/chat.rs Normal file
View File

@ -0,0 +1,140 @@
// src/chat.rs
use std::fs;
use std::process::Command;
use serde::Deserialize;
use seahorse::Context;
use crate::config::ConfigPaths;
use crate::metrics::{load_user_data, save_user_data, update_metrics_decay};
//use std::process::Stdio;
//use std::io::Write;
//use std::time::Duration;
//use std::net::TcpStream;
#[derive(Debug, Clone, PartialEq)]
pub enum Provider {
OpenAI,
Ollama,
MCP,
}
impl Provider {
pub fn from_str(s: &str) -> Option<Self> {
match s.to_lowercase().as_str() {
"openai" => Some(Provider::OpenAI),
"ollama" => Some(Provider::Ollama),
"mcp" => Some(Provider::MCP),
_ => None,
}
}
pub fn as_str(&self) -> &'static str {
match self {
Provider::OpenAI => "openai",
Provider::Ollama => "ollama",
Provider::MCP => "mcp",
}
}
}
#[derive(Deserialize)]
struct OpenAIKey {
token: String,
}
fn load_openai_api_key() -> Option<String> {
let config = ConfigPaths::new();
let path = config.base_dir.join("openai.json");
let data = fs::read_to_string(path).ok()?;
let parsed: OpenAIKey = serde_json::from_str(&data).ok()?;
Some(parsed.token)
}
pub fn ask_chat(c: &Context, question: &str) -> Option<String> {
let config = ConfigPaths::new();
let base_dir = config.base_dir.join("mcp");
let user_path = config.base_dir.join("user.json");
let mut user = load_user_data(&user_path);
user.metrics = update_metrics_decay();
// 各種オプション
let ollama_host = c.string_flag("host").ok();
let ollama_model = c.string_flag("model").ok();
let provider_str = c.string_flag("provider").unwrap_or_else(|_| "ollama".to_string());
let provider = Provider::from_str(&provider_str).unwrap_or(Provider::Ollama);
let api_key = c.string_flag("api-key").ok().or_else(load_openai_api_key);
println!("🔍 使用プロバイダー: {}", provider.as_str());
match provider {
Provider::MCP => {
let client = reqwest::blocking::Client::new();
let url = std::env::var("MCP_URL").unwrap_or("http://127.0.0.1:5000/chat".to_string());
let res = client.post(url)
.json(&serde_json::json!({"message": question}))
.send();
match res {
Ok(resp) => {
if resp.status().is_success() {
let json: serde_json::Value = resp.json().ok()?;
let text = json.get("response")?.as_str()?.to_string();
user.metrics.intimacy += 0.01;
user.metrics.last_updated = chrono::Utc::now();
save_user_data(&user_path, &user);
Some(text)
} else {
eprintln!("❌ MCPエラー: HTTP {}", resp.status());
None
}
}
Err(e) => {
eprintln!("❌ MCP接続失敗: {}", e);
None
}
}
}
_ => {
// Python 実行パス
let python_path = if cfg!(target_os = "windows") {
base_dir.join(".venv/Scripts/mcp.exe")
} else {
base_dir.join(".venv/bin/mcp")
};
let mut command = Command::new(python_path);
command.arg("ask").arg(question);
if let Some(host) = ollama_host {
command.env("OLLAMA_HOST", host);
}
if let Some(model) = ollama_model {
command.env("OLLAMA_MODEL", model.clone());
command.env("OPENAI_MODEL", model);
}
command.env("PROVIDER", provider.as_str());
if let Some(key) = api_key {
command.env("OPENAI_API_KEY", key);
}
let output = command.output().expect("❌ MCPチャットスクリプトの実行に失敗しました");
if output.status.success() {
let response = String::from_utf8_lossy(&output.stdout).to_string();
user.metrics.intimacy += 0.01;
user.metrics.last_updated = chrono::Utc::now();
save_user_data(&user_path, &user);
Some(response)
} else {
eprintln!(
"❌ 実行エラー: {}\n{}",
String::from_utf8_lossy(&output.stderr),
String::from_utf8_lossy(&output.stdout),
);
None
}
}
}
}

View File

@ -4,11 +4,13 @@ use chrono::{Duration, Local};
use rusqlite::Connection;
use seahorse::{App, Command, Context};
use crate::utils::{load_config, save_config};
use crate::commands::db::{save_cmd, export_cmd};
use crate::commands::scheduler::{scheduler_cmd};
use crate::config::ConfigPaths;
use crate::agent::AIState;
use crate::commands::db::{save_cmd, export_cmd};
use crate::commands::scheduler::{scheduler_cmd};
use crate::commands::mcp::mcp_cmd;
pub fn cli_app() -> App {
let set_cmd = Command::new("set")
@ -94,4 +96,5 @@ pub fn cli_app() -> App {
.command(save_cmd())
.command(export_cmd())
.command(scheduler_cmd())
.command(mcp_cmd())
}

17
src/commands/git_repo.rs Normal file
View File

@ -0,0 +1,17 @@
// src/commands/git_repo.rs
use std::fs;
// Gitリポジトリ内の全てのファイルを取得し、内容を読み取る
pub fn read_all_git_files(repo_path: &str) -> String {
let mut content = String::new();
for entry in fs::read_dir(repo_path).expect("ディレクトリ読み込み失敗") {
let entry = entry.expect("エントリ読み込み失敗");
let path = entry.path();
if path.is_file() {
if let Ok(file_content) = fs::read_to_string(&path) {
content.push_str(&format!("\n\n# File: {}\n{}", path.display(), file_content));
}
}
}
content
}

277
src/commands/mcp.rs Normal file
View File

@ -0,0 +1,277 @@
// src/commands/mcp.rs
use std::fs;
use std::path::{PathBuf};
use std::process::Command as OtherCommand;
use serde_json::json;
use seahorse::{Command, Context, Flag, FlagType};
use crate::chat::ask_chat;
use crate::git::{git_init, git_status};
use crate::config::ConfigPaths;
use crate::commands::git_repo::read_all_git_files;
use crate::metrics::{load_user_data, save_user_data};
use crate::memory::{log_message};
pub fn mcp_setup() {
let config = ConfigPaths::new();
let dest_dir = config.base_dir.join("mcp");
let repo_url = "https://github.com/microsoft/MCP.git";
println!("📁 MCP ディレクトリ: {}", dest_dir.display());
// 1. git cloneもしまだなければ
if !dest_dir.exists() {
let status = OtherCommand::new("git")
.args(&["clone", repo_url, dest_dir.to_str().unwrap()])
.status()
.expect("git clone に失敗しました");
assert!(status.success(), "git clone 実行時にエラーが発生しました");
}
let asset_base = PathBuf::from("mcp");
let files_to_copy = vec![
"cli.py",
"setup.py",
"scripts/ask.py",
"scripts/server.py",
"scripts/config.py",
"scripts/summarize.py",
"scripts/context_loader.py",
"scripts/prompt_template.py",
"scripts/memory_store.py",
];
for rel_path in files_to_copy {
let src = asset_base.join(rel_path);
let dst = dest_dir.join(rel_path);
if let Some(parent) = dst.parent() {
let _ = fs::create_dir_all(parent);
}
if let Err(e) = fs::copy(&src, &dst) {
eprintln!("❌ コピー失敗: {}{}: {}", src.display(), dst.display(), e);
} else {
println!("✅ コピー: {}{}", src.display(), dst.display());
}
}
// venvの作成
let venv_path = dest_dir.join(".venv");
if !venv_path.exists() {
println!("🐍 仮想環境を作成しています...");
let output = OtherCommand::new("python3")
.args(&["-m", "venv", ".venv"])
.current_dir(&dest_dir)
.output()
.expect("venvの作成に失敗しました");
if !output.status.success() {
eprintln!("❌ venv作成エラー: {}", String::from_utf8_lossy(&output.stderr));
return;
}
}
// `pip install -e .` を仮想環境で実行
let pip_path = if cfg!(target_os = "windows") {
dest_dir.join(".venv/Scripts/pip.exe").to_string_lossy().to_string()
} else {
dest_dir.join(".venv/bin/pip").to_string_lossy().to_string()
};
println!("📦 必要なパッケージをインストールしています...");
let output = OtherCommand::new(&pip_path)
.arg("install")
.arg("openai")
.arg("requests")
.arg("fastmcp")
.arg("uvicorn")
.arg("fastapi")
.arg("fastapi_mcp")
.arg("mcp")
.current_dir(&dest_dir)
.output()
.expect("pip install に失敗しました");
if !output.status.success() {
eprintln!(
"❌ pip エラー: {}\n{}",
String::from_utf8_lossy(&output.stderr),
String::from_utf8_lossy(&output.stdout)
);
return;
}
println!("📦 pip install -e . を実行します...");
let output = OtherCommand::new(&pip_path)
.arg("install")
.arg("-e")
.arg(".")
.current_dir(&dest_dir)
.output()
.expect("pip install に失敗しました");
if output.status.success() {
println!("🎉 MCP セットアップが完了しました!");
} else {
eprintln!(
"❌ pip エラー: {}\n{}",
String::from_utf8_lossy(&output.stderr),
String::from_utf8_lossy(&output.stdout)
);
}
}
fn set_api_key_cmd() -> Command {
Command::new("set-api")
.description("OpenAI APIキーを設定")
.usage("mcp set-api --api <API_KEY>")
.flag(Flag::new("api", FlagType::String).description("OpenAI APIキー").alias("a"))
.action(|c: &Context| {
if let Ok(api_key) = c.string_flag("api") {
let config = ConfigPaths::new();
let path = config.base_dir.join("openai.json");
let json_data = json!({ "token": api_key });
if let Err(e) = fs::write(&path, serde_json::to_string_pretty(&json_data).unwrap()) {
eprintln!("❌ ファイル書き込み失敗: {}", e);
} else {
println!("✅ APIキーを保存しました: {}", path.display());
}
} else {
eprintln!("❗ APIキーを --api で指定してください");
}
})
}
fn chat_cmd() -> Command {
Command::new("chat")
.description("チャットで質問を送る")
.usage("mcp chat '質問内容' --host <OLLAMA_HOST> --model <MODEL> [--provider <ollama|openai>] [--api-key <KEY>] [--repo <REPO_URL>]")
.flag(
Flag::new("host", FlagType::String)
.description("OLLAMAホストのURL")
.alias("H"),
)
.flag(
Flag::new("model", FlagType::String)
.description("モデル名 (OLLAMA_MODEL / OPENAI_MODEL)")
.alias("m"),
)
.flag(
Flag::new("provider", FlagType::String)
.description("使用するプロバイダ (ollama / openai)")
.alias("p"),
)
.flag(
Flag::new("api-key", FlagType::String)
.description("OpenAI APIキー")
.alias("k"),
)
.flag(
Flag::new("repo", FlagType::String)
.description("Gitリポジトリのパスを指定 (すべてのコードを読み込む)")
.alias("r"),
)
.action(|c: &Context| {
let config = ConfigPaths::new();
let user_path = config.data_file("json");
let mut user = load_user_data(&user_path);
// repoがある場合は、コードベース読み込みモード
if let Ok(repo_url) = c.string_flag("repo") {
let repo_base = config.base_dir.join("repos");
let repo_dir = repo_base.join(sanitize_repo_name(&repo_url));
if !repo_dir.exists() {
println!("📥 Gitリポジトリをクローン中: {}", repo_url);
let status = OtherCommand::new("git")
.args(&["clone", &repo_url, repo_dir.to_str().unwrap()])
.status()
.expect("❌ Gitのクローンに失敗しました");
assert!(status.success(), "Git clone エラー");
} else {
println!("✔ リポジトリはすでに存在します: {}", repo_dir.display());
}
let files = read_all_git_files(repo_dir.to_str().unwrap());
let prompt = format!(
"以下のコードベースを読み込んで、改善案や次のステップを提案してください:\n{}",
files
);
if let Some(response) = ask_chat(c, &prompt) {
println!("💬 提案:\n{}", response);
} else {
eprintln!("❗ 提案が取得できませんでした");
}
return;
}
// 通常のチャット処理repoが指定されていない場合
match c.args.get(0) {
Some(question) => {
log_message(&config.base_dir, "user", question);
let response = ask_chat(c, question);
if let Some(ref text) = response {
println!("💬 応答:\n{}", text);
// 返答内容に基づいて増減(返答の感情解析)
if text.contains("thank") || text.contains("great") {
user.metrics.trust += 0.05;
} else if text.contains("hate") || text.contains("bad") {
user.metrics.trust -= 0.05;
}
log_message(&config.base_dir, "ai", &text);
save_user_data(&user_path, &user);
} else {
eprintln!("❗ 応答が取得できませんでした");
}
}
None => {
eprintln!("❗ 質問が必要です: mcp chat 'こんにちは'");
}
}
})
}
fn init_cmd() -> Command {
Command::new("init")
.description("Git 初期化")
.usage("mcp init")
.action(|_| {
git_init();
})
}
fn status_cmd() -> Command {
Command::new("status")
.description("Git ステータス表示")
.usage("mcp status")
.action(|_| {
git_status();
})
}
fn setup_cmd() -> Command {
Command::new("setup")
.description("MCP の初期セットアップ")
.usage("mcp setup")
.action(|_| {
mcp_setup();
})
}
pub fn mcp_cmd() -> Command {
Command::new("mcp")
.description("MCP操作コマンド")
.usage("mcp <subcommand>")
.alias("m")
.command(chat_cmd())
.command(init_cmd())
.command(status_cmd())
.command(setup_cmd())
.command(set_api_key_cmd())
}
// ファイル名として安全な形に変換
fn sanitize_repo_name(repo_url: &str) -> String {
repo_url.replace("://", "_").replace("/", "_").replace("@", "_")
}

View File

@ -1,2 +1,4 @@
pub mod db;
pub mod scheduler;
pub mod mcp;
pub mod git_repo;

View File

@ -1,29 +1,127 @@
// src/commands/scheduler.rs
use seahorse::{Command, Context};
use std::thread;
use std::time::Duration;
use chrono::Local;
use chrono::{Local, Utc, Timelike};
use crate::metrics::{load_user_data, save_user_data};
use crate::config::ConfigPaths;
use crate::chat::ask_chat;
use rand::prelude::*;
use rand::rng;
fn send_scheduled_message() {
let config = ConfigPaths::new();
let user_path = config.data_file("json");
let mut user = load_user_data(&user_path);
if !user.metrics.can_send {
println!("🚫 送信条件を満たしていないため、スケジュール送信スキップ");
return;
}
// 日付の比較1日1回制限
let today = Local::now().format("%Y-%m-%d").to_string();
if let Some(last_date) = &user.messaging.last_sent_date {
if last_date != &today {
user.messaging.sent_today = false;
}
} else {
user.messaging.sent_today = false;
}
if user.messaging.sent_today {
println!("🔁 本日はすでに送信済みです: {}", today);
return;
}
if let Some(schedule_str) = &user.messaging.schedule_time {
let now = Local::now();
let target: Vec<&str> = schedule_str.split(':').collect();
if target.len() != 2 {
println!("⚠️ schedule_time形式が無効です: {}", schedule_str);
return;
}
let (sh, sm) = (target[0].parse::<u32>(), target[1].parse::<u32>());
if let (Ok(sh), Ok(sm)) = (sh, sm) {
if now.hour() == sh && now.minute() == sm {
if let Some(msg) = user.messaging.templates.choose(&mut rng()) {
println!("💬 自動送信メッセージ: {}", msg);
let dummy_context = Context::new(vec![], None, "".to_string());
ask_chat(&dummy_context, msg);
user.metrics.intimacy += 0.03;
// 送信済みのフラグ更新
user.messaging.sent_today = true;
user.messaging.last_sent_date = Some(today);
save_user_data(&user_path, &user);
}
}
}
}
}
pub fn scheduler_cmd() -> Command {
Command::new("scheduler")
.usage("scheduler [interval_sec]")
.alias("s")
.description("定期的に送信条件をチェックし、自発的なメッセージ送信を試みる")
.action(|c: &Context| {
let interval = c.args.get(0)
.and_then(|s| s.parse::<u64>().ok())
.unwrap_or(60); // デフォルト: 60秒ごと
.unwrap_or(3600); // デフォルト: 1時間テストしやすく
println!("⏳ スケジューラー開始({interval}秒ごと)...");
println!("⏳ スケジューラー開始({}秒ごと)...", interval);
loop {
let now = Local::now();
println!("🔁 タスク実行中: {}", now.format("%Y-%m-%d %H:%M:%S"));
let config = ConfigPaths::new();
let user_path = config.data_file("json");
let mut user = load_user_data(&user_path);
// ここで talk_cmd や save_cmd の内部処理を呼ぶ感じ
// たとえば load_config → AI更新 → print とか
let now = Utc::now();
let elapsed = now.signed_duration_since(user.metrics.last_updated);
let hours = elapsed.num_minutes() as f32 / 60.0;
let speed_factor = if hours > 48.0 {
2.0
} else if hours > 24.0 {
1.5
} else {
1.0
};
user.metrics.trust = (user.metrics.trust - 0.01 * speed_factor).clamp(0.0, 1.0);
user.metrics.intimacy = (user.metrics.intimacy - 0.01 * speed_factor).clamp(0.0, 1.0);
user.metrics.energy = (user.metrics.energy - 0.01 * speed_factor).clamp(0.0, 1.0);
user.metrics.can_send =
user.metrics.trust >= 0.5 &&
user.metrics.intimacy >= 0.5 &&
user.metrics.energy >= 0.5;
user.metrics.last_updated = now;
if user.metrics.can_send {
println!("💡 AIメッセージ送信条件を満たしています信頼:{:.2}, 親密:{:.2}, エネルギー:{:.2}",
user.metrics.trust,
user.metrics.intimacy,
user.metrics.energy
);
send_scheduled_message();
} else {
println!("🤫 条件未達成のため送信スキップ: trust={:.2}, intimacy={:.2}, energy={:.2}",
user.metrics.trust,
user.metrics.intimacy,
user.metrics.energy
);
}
save_user_data(&user_path, &user);
thread::sleep(Duration::from_secs(interval));
}
})
}

42
src/git.rs Normal file
View File

@ -0,0 +1,42 @@
// src/git.rs
use std::process::Command;
pub fn git_status() {
run_git_command(&["status"]);
}
pub fn git_init() {
run_git_command(&["init"]);
}
#[allow(dead_code)]
pub fn git_commit(message: &str) {
run_git_command(&["add", "."]);
run_git_command(&["commit", "-m", message]);
}
#[allow(dead_code)]
pub fn git_push() {
run_git_command(&["push"]);
}
#[allow(dead_code)]
pub fn git_pull() {
run_git_command(&["pull"]);
}
#[allow(dead_code)]
pub fn git_branch() {
run_git_command(&["branch"]);
}
fn run_git_command(args: &[&str]) {
let status = Command::new("git")
.args(args)
.status()
.expect("git コマンドの実行に失敗しました");
if !status.success() {
eprintln!("⚠️ git コマンドに失敗しました: {:?}", args);
}
}

View File

@ -6,6 +6,10 @@ mod cli;
mod utils;
mod commands;
mod config;
mod git;
mod chat;
mod metrics;
mod memory;
use cli::cli_app;
use seahorse::App;

49
src/memory.rs Normal file
View File

@ -0,0 +1,49 @@
// src/memory.rs
use chrono::{DateTime, Local, Utc};
use serde::{Deserialize, Serialize};
use std::fs::{self};
//use std::fs::{self, OpenOptions};
use std::io::{BufReader, BufWriter};
use std::path::PathBuf;
use std::{fs::File};
//use std::{env, fs::File};
#[derive(Debug, Serialize, Deserialize)]
pub struct MemoryEntry {
pub timestamp: DateTime<Utc>,
pub sender: String,
pub message: String,
}
pub fn log_message(base_dir: &PathBuf, sender: &str, message: &str) {
let now_utc = Utc::now();
let date_str = Local::now().format("%Y-%m-%d").to_string();
let mut file_path = base_dir.clone();
file_path.push("memory");
let _ = fs::create_dir_all(&file_path);
file_path.push(format!("{}.json", date_str));
let new_entry = MemoryEntry {
timestamp: now_utc,
sender: sender.to_string(),
message: message.to_string(),
};
let mut entries = if file_path.exists() {
let file = File::open(&file_path).expect("💥 メモリファイルの読み込み失敗");
let reader = BufReader::new(file);
serde_json::from_reader(reader).unwrap_or_else(|_| vec![])
} else {
vec![]
};
entries.push(new_entry);
let file = File::create(&file_path).expect("💥 メモリファイルの書き込み失敗");
let writer = BufWriter::new(file);
serde_json::to_writer_pretty(writer, &entries).expect("💥 JSONの書き込み失敗");
}
// 利用例ask_chatの中
// log_message(&config.base_dir, "user", question);
// log_message(&config.base_dir, "ai", &response);

147
src/metrics.rs Normal file
View File

@ -0,0 +1,147 @@
// src/metrics.rs
use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize};
use std::fs;
use std::path::Path;
use crate::config::ConfigPaths;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Metrics {
pub trust: f32,
pub intimacy: f32,
pub energy: f32,
pub can_send: bool,
pub last_updated: DateTime<Utc>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Personality {
pub kind: String,
pub strength: f32,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Relationship {
pub trust: f32,
pub intimacy: f32,
pub curiosity: f32,
pub threshold: f32,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Environment {
pub luck_today: f32,
pub luck_history: Vec<f32>,
pub level: i32,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Messaging {
pub enabled: bool,
pub schedule_time: Option<String>,
pub decay_rate: f32,
pub templates: Vec<String>,
pub sent_today: bool, // 追加
pub last_sent_date: Option<String>, // 追加
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Memory {
pub recent_messages: Vec<String>,
pub long_term_notes: Vec<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct UserData {
pub personality: Personality,
pub relationship: Relationship,
pub environment: Environment,
pub messaging: Messaging,
pub last_interaction: DateTime<Utc>,
pub memory: Memory,
pub metrics: Metrics,
}
impl Metrics {
pub fn decay(&mut self) {
let now = Utc::now();
let hours = (now - self.last_updated).num_minutes() as f32 / 60.0;
self.trust = decay_param(self.trust, hours);
self.intimacy = decay_param(self.intimacy, hours);
self.energy = decay_param(self.energy, hours);
self.can_send = self.trust >= 0.5 && self.intimacy >= 0.5 && self.energy >= 0.5;
self.last_updated = now;
}
}
pub fn load_user_data(path: &Path) -> UserData {
let config = ConfigPaths::new();
let example_path = Path::new("example.json");
config.ensure_file_exists("json", example_path);
if !path.exists() {
return UserData {
personality: Personality {
kind: "positive".into(),
strength: 0.8,
},
relationship: Relationship {
trust: 0.2,
intimacy: 0.6,
curiosity: 0.5,
threshold: 1.5,
},
environment: Environment {
luck_today: 0.9,
luck_history: vec![0.9, 0.9, 0.9],
level: 1,
},
messaging: Messaging {
enabled: true,
schedule_time: Some("08:00".to_string()),
decay_rate: 0.1,
templates: vec![
"おはよう!今日もがんばろう!".to_string(),
"ねえ、話したいことがあるの。".to_string(),
],
sent_today: false,
last_sent_date: None,
},
last_interaction: Utc::now(),
memory: Memory {
recent_messages: vec![],
long_term_notes: vec![],
},
metrics: Metrics {
trust: 0.5,
intimacy: 0.5,
energy: 0.5,
can_send: true,
last_updated: Utc::now(),
},
};
}
let content = fs::read_to_string(path).expect("user.json の読み込みに失敗しました");
serde_json::from_str(&content).expect("user.json のパースに失敗しました")
}
pub fn save_user_data(path: &Path, data: &UserData) {
let content = serde_json::to_string_pretty(data).expect("user.json のシリアライズ失敗");
fs::write(path, content).expect("user.json の書き込みに失敗しました");
}
pub fn update_metrics_decay() -> Metrics {
let config = ConfigPaths::new();
let path = config.base_dir.join("user.json");
let mut data = load_user_data(&path);
data.metrics.decay();
save_user_data(&path, &data);
data.metrics
}
fn decay_param(value: f32, hours: f32) -> f32 {
let decay_rate = 0.05;
(value * (1.0f32 - decay_rate).powf(hours)).clamp(0.0, 1.0)
}

View File

@ -1,42 +0,0 @@
use std::env;
use std::process::{Command, Stdio};
use std::io::{self, Write};
fn main() {
let args: Vec<String> = env::args().collect();
if args.len() < 2 {
eprintln!("Usage: langchain_cli <prompt>");
std::process::exit(1);
}
let prompt = &args[1];
// Simulate a pipeline stage: e.g., tokenization, reasoning, response generation
let stages = vec!["Tokenize", "Reason", "Generate"];
for stage in &stages {
println!("[Stage: {}] Processing...", stage);
}
// Example call to Python-based LangChain (assuming you have a script or API to call)
// For placeholder purposes, we echo the prompt back.
let output = Command::new("python3")
.arg("-c")
.arg(format!("print(\"LangChain Agent Response for: {}\")", prompt))
.stdout(Stdio::piped())
.spawn()
.expect("failed to execute process")
.wait_with_output()
.expect("failed to wait on child");
io::stdout().write_all(&output.stdout).unwrap();
}
/*
TODO (for future LangChain-style pipeline):
1. Implement trait-based agent components: Tokenizer, Retriever, Reasoner, Generator.
2. Allow config via YAML or TOML to define chain flow.
3. Async pipeline support with Tokio.
4. Optional integration with LLM APIs (OpenAI, Ollama, etc).
5. Rust-native vector search (e.g. using `tantivy`, `qdrant-client`).
*/

View File

@ -1,133 +0,0 @@
#[derive(Debug, Serialize, Deserialize)]
pub struct RelationalAutonomousAI {
pub system_name: String,
pub description: String,
pub core_components: CoreComponents,
pub extensions: Extensions,
pub note: String,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct CoreComponents {
pub personality: Personality,
pub relationship: Relationship,
pub environment: Environment,
pub memory: Memory,
pub message_trigger: MessageTrigger,
pub message_generation: MessageGeneration,
pub state_transition: StateTransition,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct Personality {
pub r#type: String,
pub variants: Vec<String>,
pub parameters: PersonalityParameters,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct PersonalityParameters {
pub message_trigger_style: String,
pub decay_rate_modifier: String,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct Relationship {
pub parameters: Vec<String>,
pub properties: RelationshipProperties,
pub decay_function: String,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct RelationshipProperties {
pub persistent: bool,
pub hidden: bool,
pub irreversible: bool,
pub decay_over_time: bool,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct Environment {
pub daily_luck: DailyLuck,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct DailyLuck {
pub r#type: String,
pub range: Vec<f32>,
pub update: String,
pub streak_mechanism: StreakMechanism,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct StreakMechanism {
pub trigger: String,
pub effect: String,
pub chance: f32,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct Memory {
pub long_term_memory: String,
pub short_term_context: String,
pub usage_in_generation: bool,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct MessageTrigger {
pub condition: TriggerCondition,
pub timing: TriggerTiming,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct TriggerCondition {
pub relationship_threshold: String,
pub time_decay: bool,
pub environment_luck: String,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct TriggerTiming {
pub based_on: Vec<String>,
pub modifiers: String,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct MessageGeneration {
pub style_variants: Vec<String>,
pub influenced_by: Vec<String>,
pub llm_integration: bool,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct StateTransition {
pub states: Vec<String>,
pub transitions: String,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct Extensions {
pub persistence: Persistence,
pub api: Api,
pub scheduler: Scheduler,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct Persistence {
pub database: String,
pub storage_items: Vec<String>,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct Api {
pub llm: String,
pub mode: String,
pub external_event_trigger: bool,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct Scheduler {
pub async_event_loop: bool,
pub interval_check: i32,
pub time_decay_check: bool,
}

File diff suppressed because one or more lines are too long

View File

@ -1,46 +0,0 @@
use serde::{Deserialize, Serialize};
use std::fs::File;
use std::io::{BufReader, Write};
use std::time::{SystemTime, UNIX_EPOCH};
mod model;
use model::RelationalAutonomousAI;
fn load_config(path: &str) -> std::io::Result<RelationalAutonomousAI> {
let file = File::open(path)?;
let reader = BufReader::new(file);
let config: RelationalAutonomousAI = serde_json::from_reader(reader)?;
Ok(config)
}
fn save_config(config: &RelationalAutonomousAI, path: &str) -> std::io::Result<()> {
let mut file = File::create(path)?;
let json = serde_json::to_string_pretty(config)?;
file.write_all(json.as_bytes())?;
Ok(())
}
fn should_send_message(config: &RelationalAutonomousAI) -> bool {
// 簡易な送信条件: relationshipが高く、daily_luckが0.8以上
config.core_components.relationship.parameters.contains(&"trust".to_string())
&& config.core_components.environment.daily_luck.range[1] >= 0.8
}
fn main() -> std::io::Result<()> {
let path = "config.json";
let mut config = load_config(path)?;
if should_send_message(&config) {
println!("💌 メッセージを送信できます: {:?}", config.core_components.personality.r#type);
// ステート変化の例: メッセージ送信後に記録用トランジションを追加
config.core_components.state_transition.transitions.push("message_sent".to_string());
save_config(&config, path)?;
} else {
println!("😶 まだ送信条件に達していません。");
}
Ok(())
}