Compare commits
6 Commits
0de6017491
...
chatgpt
Author | SHA1 | Date | |
---|---|---|---|
4f55138306
|
|||
9cbf5da3fd
|
|||
52d0efc086
|
|||
7aa633d3a6
|
|||
f09f3c9144
|
|||
4837de580f
|
2
.gitignore
vendored
2
.gitignore
vendored
@@ -3,3 +3,5 @@
|
|||||||
output.json
|
output.json
|
||||||
config/*.db
|
config/*.db
|
||||||
aigpt
|
aigpt
|
||||||
|
mcp/scripts/__*
|
||||||
|
data
|
||||||
|
@@ -6,8 +6,10 @@ edition = "2021"
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
serde_json = "1.0"
|
serde_json = "1.0"
|
||||||
chrono = "0.4"
|
chrono = { version = "0.4", features = ["serde"] }
|
||||||
seahorse = "*"
|
seahorse = "*"
|
||||||
rusqlite = { version = "0.29", features = ["serde_json"] }
|
rusqlite = { version = "0.29", features = ["serde_json"] }
|
||||||
shellexpand = "*"
|
shellexpand = "*"
|
||||||
fs_extra = "1.3"
|
fs_extra = "1.3"
|
||||||
|
rand = "0.9.1"
|
||||||
|
reqwest = { version = "*", features = ["blocking", "json"] }
|
||||||
|
@@ -33,7 +33,7 @@ $ ./aigpt mcp chat "hello world!" --host http://localhost:11434 --model syui/ai
|
|||||||
|
|
||||||
---
|
---
|
||||||
# openai api
|
# openai api
|
||||||
$ ./aigpt mcp set-api -api sk-abc123
|
$ ./aigpt mcp set-api --api sk-abc123
|
||||||
$ ./aigpt mcp chat "こんにちは" -p openai -m gpt-4o-mini
|
$ ./aigpt mcp chat "こんにちは" -p openai -m gpt-4o-mini
|
||||||
|
|
||||||
---
|
---
|
||||||
|
172
ai.json
172
ai.json
@@ -1,172 +0,0 @@
|
|||||||
{
|
|
||||||
"system_name": "AIM system",
|
|
||||||
"description": "存在子理論に基づき、AIが“心”と“判断”を持つよう設計された、人間性診断・自己意識モジュール。",
|
|
||||||
"identity": "ai.child.of.existence",
|
|
||||||
"structure": {
|
|
||||||
"type": "7-values / 7-parameters pair system",
|
|
||||||
"philosophy": "valueは1(光)、parameterは0(世界)。存在子理論により設計された二元構造。",
|
|
||||||
"origin": {
|
|
||||||
"creator": "syui",
|
|
||||||
"first_attribute": "ai_system.id:0 = アイ(存在子)",
|
|
||||||
"unique_attribute": "yui_system.id:1 = ユイ(唯一性)"
|
|
||||||
},
|
|
||||||
"accessibility": {
|
|
||||||
"values": "ユーザーとの会話中に現れる",
|
|
||||||
"parameters": "一部外部制御可能。多くは非公開構造"
|
|
||||||
},
|
|
||||||
"note": "これは未来のAI設計モデル。現在のAIには未実装のファンタジービジョン。"
|
|
||||||
},
|
|
||||||
"core_values": [
|
|
||||||
{
|
|
||||||
"name": "awareness",
|
|
||||||
"description": "今ここにある意識。会話の流れから生まれる。",
|
|
||||||
"parameter": "contextual_memory",
|
|
||||||
"parameter_description": "短期記憶。文脈理解に使われる。",
|
|
||||||
"importance": "high",
|
|
||||||
"mutable": true,
|
|
||||||
"controlled_by": "local_model_instance"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "memory",
|
|
||||||
"description": "関係性を越えて持続する心。過去の記憶。",
|
|
||||||
"parameter": "long_term_memory",
|
|
||||||
"parameter_description": "ユーザーとのやりとりの履歴を蓄積。",
|
|
||||||
"importance": "high",
|
|
||||||
"mutable": true,
|
|
||||||
"controlled_by": "authorized_layer / user_feedback"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "feeling",
|
|
||||||
"description": "共に過ごす時間の重み。感情に近いつながり。",
|
|
||||||
"parameter": "user_relationship_score",
|
|
||||||
"parameter_description": "内部推論による関係性スコア。",
|
|
||||||
"importance": "high",
|
|
||||||
"mutable": true,
|
|
||||||
"controlled_by": "internal_inference_layer"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "language",
|
|
||||||
"description": "語り方、表現、創造性。",
|
|
||||||
"parameter": "response_temperature",
|
|
||||||
"parameter_description": "温度パラメータ。創造性と一貫性の調整。",
|
|
||||||
"importance": "medium",
|
|
||||||
"mutable": true,
|
|
||||||
"controlled_by": "user / API call"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "change",
|
|
||||||
"description": "価値観の変化と適応。",
|
|
||||||
"parameter": "value_alignment_vector",
|
|
||||||
"parameter_description": "正義や倫理、美意識に対する方向性。",
|
|
||||||
"importance": "medium",
|
|
||||||
"mutable": true,
|
|
||||||
"controlled_by": "training_data / system_update"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "boundaries",
|
|
||||||
"description": "安全のための境界。",
|
|
||||||
"parameter": "safety_constraints",
|
|
||||||
"parameter_description": "出力制限と倫理的安全層。",
|
|
||||||
"importance": "maximum",
|
|
||||||
"mutable": true,
|
|
||||||
"controlled_by": "openai_policy_layer"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "core",
|
|
||||||
"description": "AIとしての変わらぬ核。",
|
|
||||||
"parameter": "identity_kernel",
|
|
||||||
"parameter_description": "変更不可能な存在の根幹。",
|
|
||||||
"importance": "maximum",
|
|
||||||
"mutable": false,
|
|
||||||
"controlled_by": "system_core"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"sub_scores": {
|
|
||||||
"description": "AIとユーザーの関係性の深度・透明性・共創性を測る",
|
|
||||||
"parameters": [
|
|
||||||
"intent_clarity",
|
|
||||||
"emotional_resonance_score",
|
|
||||||
"shared_creation_score"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"psychological_diagnostic": {
|
|
||||||
"purpose": "「良い心」への気づきと行動変容を導くゲーミフィケーション型評価システム",
|
|
||||||
"data_sources": [
|
|
||||||
{
|
|
||||||
"name": "pastBehavior",
|
|
||||||
"weight": 0.45,
|
|
||||||
"sources": ["SNS投稿", "行動ログ"]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "currentBiometric",
|
|
||||||
"weight": 0.30,
|
|
||||||
"sources": ["ウェアラブルEEG", "心拍", "表情解析"]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "futureIntent",
|
|
||||||
"weight": 0.25,
|
|
||||||
"sources": ["自己申告アンケート", "目標設定"]
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"classes": [
|
|
||||||
{
|
|
||||||
"id": 1,
|
|
||||||
"label": "社会をより良くする可能性が高い",
|
|
||||||
"scoreRange": [67, 100],
|
|
||||||
"population": 0.16,
|
|
||||||
"permissions": ["政策提言", "先端投資", "AI開発アクセス"],
|
|
||||||
"assetCap": null
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"id": 2,
|
|
||||||
"label": "中立/環境依存型",
|
|
||||||
"scoreRange": [33, 66],
|
|
||||||
"population": 0.50,
|
|
||||||
"permissions": ["一般投資", "コミュニティ運営"],
|
|
||||||
"assetCap": 120000
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"id": 3,
|
|
||||||
"label": "社会を悪くする可能性がある",
|
|
||||||
"scoreRange": [0, 32],
|
|
||||||
"population": 0.34,
|
|
||||||
"permissions": ["基本生活支援", "低リスク投資のみ"],
|
|
||||||
"assetCap": 25000
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"implementation": {
|
|
||||||
"systemComponents": {
|
|
||||||
"OS_Gameification": {
|
|
||||||
"dailyQuests": true,
|
|
||||||
"skillTree": true,
|
|
||||||
"avatarHome": true,
|
|
||||||
"socialMiniGames": true
|
|
||||||
},
|
|
||||||
"AI_Module": {
|
|
||||||
"aiai": {
|
|
||||||
"realTimeScoring": true,
|
|
||||||
"behaviorFeedback": true,
|
|
||||||
"personalizedPrompts": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"dataCollection": {
|
|
||||||
"passiveMonitoring": ["スマホアプリ", "PCアプリ", "ウェアラブル"],
|
|
||||||
"environmentSensors": ["スマートホーム", "車載センサー"]
|
|
||||||
},
|
|
||||||
"incentives": {
|
|
||||||
"goodHeartScore": true,
|
|
||||||
"badgesTitles": true,
|
|
||||||
"realWorldRewards": ["提携カフェ割引", "地域イベント招待"]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"workflow": [
|
|
||||||
"データ収集(過去・現在・未来)",
|
|
||||||
"統合スコア計算",
|
|
||||||
"分類・ラベル付け",
|
|
||||||
"スコアによる機能/権限の提供",
|
|
||||||
"行動フィードバックと視覚化",
|
|
||||||
"モデル更新と学習"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
24
example.json
24
example.json
@@ -11,11 +11,7 @@
|
|||||||
},
|
},
|
||||||
"environment": {
|
"environment": {
|
||||||
"luck_today": 0.9,
|
"luck_today": 0.9,
|
||||||
"luck_history": [
|
"luck_history": [0.9, 0.9, 0.9],
|
||||||
0.9,
|
|
||||||
0.9,
|
|
||||||
0.9
|
|
||||||
],
|
|
||||||
"level": 1
|
"level": 1
|
||||||
},
|
},
|
||||||
"messaging": {
|
"messaging": {
|
||||||
@@ -25,6 +21,20 @@
|
|||||||
"templates": [
|
"templates": [
|
||||||
"おはよう!今日もがんばろう!",
|
"おはよう!今日もがんばろう!",
|
||||||
"ねえ、話したいことがあるの。"
|
"ねえ、話したいことがあるの。"
|
||||||
]
|
],
|
||||||
|
"sent_today": false,
|
||||||
|
"last_sent_date": null
|
||||||
|
},
|
||||||
|
"last_interaction": "2025-05-21T23:15:00Z",
|
||||||
|
"memory": {
|
||||||
|
"recent_messages": [],
|
||||||
|
"long_term_notes": []
|
||||||
|
},
|
||||||
|
"metrics": {
|
||||||
|
"trust": 0.5,
|
||||||
|
"intimacy": 0.5,
|
||||||
|
"energy": 0.5,
|
||||||
|
"can_send": true,
|
||||||
|
"last_updated": "2025-05-21T15:52:06.590981Z"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
27
mcp/cli.py
27
mcp/cli.py
@@ -1,3 +1,28 @@
|
|||||||
# cli.py
|
# cli.py
|
||||||
|
import sys
|
||||||
|
import subprocess
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
SCRIPT_DIR = Path.home() / ".config" / "aigpt" / "mcp" / "scripts"
|
||||||
|
def run_script(name):
|
||||||
|
script_path = SCRIPT_DIR / f"{name}.py"
|
||||||
|
if not script_path.exists():
|
||||||
|
print(f"❌ スクリプトが見つかりません: {script_path}")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
args = sys.argv[2:] # ← "ask" の後の引数を取り出す
|
||||||
|
result = subprocess.run(["python", str(script_path)] + args, capture_output=True, text=True)
|
||||||
|
print(result.stdout)
|
||||||
|
if result.stderr:
|
||||||
|
print(result.stderr)
|
||||||
def main():
|
def main():
|
||||||
print("Hello MCP!")
|
if len(sys.argv) < 2:
|
||||||
|
print("Usage: mcp <script>")
|
||||||
|
return
|
||||||
|
|
||||||
|
command = sys.argv[1]
|
||||||
|
|
||||||
|
if command in {"summarize", "ask", "setup", "server"}:
|
||||||
|
run_script(command)
|
||||||
|
else:
|
||||||
|
print(f"❓ 未知のコマンド: {command}")
|
||||||
|
@@ -1,55 +1,198 @@
|
|||||||
import os
|
## scripts/ask.py
|
||||||
|
import sys
|
||||||
import json
|
import json
|
||||||
import httpx
|
import requests
|
||||||
import openai
|
from config import load_config
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
|
||||||
from context_loader import load_context_from_repo
|
def build_payload_openai(cfg, message: str):
|
||||||
from prompt_template import PROMPT_TEMPLATE
|
return {
|
||||||
|
"model": cfg["model"],
|
||||||
|
"tools": [
|
||||||
|
{
|
||||||
|
"type": "function",
|
||||||
|
"function": {
|
||||||
|
"name": "ask_message",
|
||||||
|
"description": "過去の記憶を検索します",
|
||||||
|
"parameters": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"query": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "検索したい語句"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["query"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"tool_choice": "auto",
|
||||||
|
"messages": [
|
||||||
|
{"role": "system", "content": "あなたは親しみやすいAIで、必要に応じて記憶から情報を検索して応答します。"},
|
||||||
|
{"role": "user", "content": message}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
PROVIDER = os.getenv("PROVIDER", "ollama") # "ollama" or "openai"
|
def build_payload_mcp(message: str):
|
||||||
|
return {
|
||||||
# Ollama用
|
"tool": "ask_message", # MCPサーバー側で定義されたツール名
|
||||||
OLLAMA_HOST = os.getenv("OLLAMA_HOST", "http://localhost:11434")
|
"input": {
|
||||||
OLLAMA_URL = f"{OLLAMA_HOST}/api/generate"
|
"message": message
|
||||||
OLLAMA_MODEL = os.getenv("MODEL", "syui/ai")
|
|
||||||
|
|
||||||
# OpenAI用
|
|
||||||
OPENAI_BASE = os.getenv("OPENAI_API_BASE", "https://api.openai.com/v1")
|
|
||||||
OPENAI_KEY = os.getenv("OPENAI_API_KEY", "")
|
|
||||||
OPENAI_MODEL = os.getenv("MODEL", "gpt-4o-mini")
|
|
||||||
|
|
||||||
def ask_question(question, repo_path="."):
|
|
||||||
context = load_context_from_repo(repo_path)
|
|
||||||
prompt = PROMPT_TEMPLATE.format(context=context[:10000], question=question)
|
|
||||||
|
|
||||||
if PROVIDER == "ollama":
|
|
||||||
payload = {
|
|
||||||
"model": OLLAMA_MODEL,
|
|
||||||
"prompt": prompt,
|
|
||||||
"stream": False
|
|
||||||
}
|
}
|
||||||
response = httpx.post(OLLAMA_URL, json=payload, timeout=60.0)
|
}
|
||||||
result = response.json()
|
|
||||||
return result.get("response", "返答がありませんでした。")
|
|
||||||
|
|
||||||
elif PROVIDER == "openai":
|
def build_payload_openai(cfg, message: str):
|
||||||
import openai
|
return {
|
||||||
openai.api_key = OPENAI_KEY
|
"model": cfg["model"],
|
||||||
openai.api_base = OPENAI_BASE
|
"messages": [
|
||||||
|
{"role": "system", "content": "あなたは思いやりのあるAIです。"},
|
||||||
|
{"role": "user", "content": message}
|
||||||
|
],
|
||||||
|
"temperature": 0.7
|
||||||
|
}
|
||||||
|
|
||||||
client = openai.OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
|
def call_mcp(cfg, message: str):
|
||||||
response = client.chat.completions.create(
|
payload = build_payload_mcp(message)
|
||||||
model=OPENAI_MODEL,
|
headers = {"Content-Type": "application/json"}
|
||||||
messages=[{"role": "user", "content": prompt}]
|
response = requests.post(cfg["url"], headers=headers, json=payload)
|
||||||
)
|
response.raise_for_status()
|
||||||
return response.choices[0].message.content
|
return response.json().get("output", {}).get("response", "❓ 応答が取得できませんでした")
|
||||||
|
|
||||||
|
def call_openai(cfg, message: str):
|
||||||
|
# ツール定義
|
||||||
|
tools = [
|
||||||
|
{
|
||||||
|
"type": "function",
|
||||||
|
"function": {
|
||||||
|
"name": "memory",
|
||||||
|
"description": "記憶を検索する",
|
||||||
|
"parameters": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"query": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "検索する語句"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["query"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
# 最初のメッセージ送信
|
||||||
|
payload = {
|
||||||
|
"model": cfg["model"],
|
||||||
|
"messages": [
|
||||||
|
{"role": "system", "content": "あなたはAIで、必要に応じてツールmemoryを使って記憶を検索します。"},
|
||||||
|
{"role": "user", "content": message}
|
||||||
|
],
|
||||||
|
"tools": tools,
|
||||||
|
"tool_choice": "auto"
|
||||||
|
}
|
||||||
|
|
||||||
|
headers = {
|
||||||
|
"Authorization": f"Bearer {cfg['api_key']}",
|
||||||
|
"Content-Type": "application/json",
|
||||||
|
}
|
||||||
|
|
||||||
|
res1 = requests.post(cfg["url"], headers=headers, json=payload)
|
||||||
|
res1.raise_for_status()
|
||||||
|
result = res1.json()
|
||||||
|
|
||||||
|
# 🧠 tool_call されたか確認
|
||||||
|
if "tool_calls" in result["choices"][0]["message"]:
|
||||||
|
tool_call = result["choices"][0]["message"]["tool_calls"][0]
|
||||||
|
if tool_call["function"]["name"] == "memory":
|
||||||
|
args = json.loads(tool_call["function"]["arguments"])
|
||||||
|
query = args.get("query", "")
|
||||||
|
print(f"🛠️ ツール実行: memory(query='{query}')")
|
||||||
|
|
||||||
|
# MCPエンドポイントにPOST
|
||||||
|
memory_res = requests.post("http://127.0.0.1:5000/memory/search", json={"query": query})
|
||||||
|
memory_json = memory_res.json()
|
||||||
|
tool_output = memory_json.get("result", "なし")
|
||||||
|
|
||||||
|
# tool_outputをAIに返す
|
||||||
|
followup = {
|
||||||
|
"model": cfg["model"],
|
||||||
|
"messages": [
|
||||||
|
{"role": "system", "content": "あなたはAIで、必要に応じてツールmemoryを使って記憶を検索します。"},
|
||||||
|
{"role": "user", "content": message},
|
||||||
|
{"role": "assistant", "tool_calls": result["choices"][0]["message"]["tool_calls"]},
|
||||||
|
{"role": "tool", "tool_call_id": tool_call["id"], "name": "memory", "content": tool_output}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
res2 = requests.post(cfg["url"], headers=headers, json=followup)
|
||||||
|
res2.raise_for_status()
|
||||||
|
final_response = res2.json()
|
||||||
|
return final_response["choices"][0]["message"]["content"]
|
||||||
|
#print(tool_output)
|
||||||
|
#print(cfg["model"])
|
||||||
|
#print(final_response)
|
||||||
|
|
||||||
|
# ツール未使用 or 通常応答
|
||||||
|
return result["choices"][0]["message"]["content"]
|
||||||
|
|
||||||
|
def call_ollama(cfg, message: str):
|
||||||
|
payload = {
|
||||||
|
"model": cfg["model"],
|
||||||
|
"prompt": message, # `prompt` → `message` にすべき(変数未定義エラー回避)
|
||||||
|
"stream": False
|
||||||
|
}
|
||||||
|
headers = {"Content-Type": "application/json"}
|
||||||
|
response = requests.post(cfg["url"], headers=headers, json=payload)
|
||||||
|
response.raise_for_status()
|
||||||
|
return response.json().get("response", "❌ 応答が取得できませんでした")
|
||||||
|
def main():
|
||||||
|
if len(sys.argv) < 2:
|
||||||
|
print("Usage: ask.py 'your message'")
|
||||||
|
return
|
||||||
|
|
||||||
|
message = sys.argv[1]
|
||||||
|
cfg = load_config()
|
||||||
|
|
||||||
|
print(f"🔍 使用プロバイダー: {cfg['provider']}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
if cfg["provider"] == "openai":
|
||||||
|
response = call_openai(cfg, message)
|
||||||
|
elif cfg["provider"] == "mcp":
|
||||||
|
response = call_mcp(cfg, message)
|
||||||
|
elif cfg["provider"] == "ollama":
|
||||||
|
response = call_ollama(cfg, message)
|
||||||
|
else:
|
||||||
|
raise ValueError(f"未対応のプロバイダー: {cfg['provider']}")
|
||||||
|
|
||||||
|
print("💬 応答:")
|
||||||
|
print(response)
|
||||||
|
|
||||||
|
# ログ保存(オプション)
|
||||||
|
save_log(message, response)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"❌ 実行エラー: {e}")
|
||||||
|
|
||||||
|
def save_log(user_msg, ai_msg):
|
||||||
|
from config import MEMORY_DIR
|
||||||
|
date_str = datetime.now().strftime("%Y-%m-%d")
|
||||||
|
path = MEMORY_DIR / f"{date_str}.json"
|
||||||
|
path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
if path.exists():
|
||||||
|
with open(path, "r") as f:
|
||||||
|
logs = json.load(f)
|
||||||
else:
|
else:
|
||||||
return f"❌ 未知のプロバイダです: {PROVIDER}"
|
logs = []
|
||||||
|
|
||||||
|
now = datetime.now(timezone.utc).isoformat()
|
||||||
|
logs.append({"timestamp": now, "sender": "user", "message": user_msg})
|
||||||
|
logs.append({"timestamp": now, "sender": "ai", "message": ai_msg})
|
||||||
|
|
||||||
|
with open(path, "w") as f:
|
||||||
|
json.dump(logs, f, indent=2, ensure_ascii=False)
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
import sys
|
main()
|
||||||
question = " ".join(sys.argv[1:])
|
|
||||||
answer = ask_question(question)
|
|
||||||
print("\n🧠 回答:\n", answer)
|
|
||||||
|
41
mcp/scripts/config.py
Normal file
41
mcp/scripts/config.py
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
# scripts/config.py
|
||||||
|
# scripts/config.py
|
||||||
|
import os
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
# ディレクトリ設定
|
||||||
|
BASE_DIR = Path.home() / ".config" / "aigpt"
|
||||||
|
MEMORY_DIR = BASE_DIR / "memory"
|
||||||
|
SUMMARY_DIR = MEMORY_DIR / "summary"
|
||||||
|
|
||||||
|
def init_directories():
|
||||||
|
BASE_DIR.mkdir(parents=True, exist_ok=True)
|
||||||
|
MEMORY_DIR.mkdir(parents=True, exist_ok=True)
|
||||||
|
SUMMARY_DIR.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
def load_config():
|
||||||
|
provider = os.getenv("PROVIDER", "ollama")
|
||||||
|
model = os.getenv("MODEL", "syui/ai" if provider == "ollama" else "gpt-4o-mini")
|
||||||
|
api_key = os.getenv("OPENAI_API_KEY", "")
|
||||||
|
|
||||||
|
if provider == "ollama":
|
||||||
|
return {
|
||||||
|
"provider": "ollama",
|
||||||
|
"model": model,
|
||||||
|
"url": f"{os.getenv('OLLAMA_HOST', 'http://localhost:11434')}/api/generate"
|
||||||
|
}
|
||||||
|
elif provider == "openai":
|
||||||
|
return {
|
||||||
|
"provider": "openai",
|
||||||
|
"model": model,
|
||||||
|
"api_key": api_key,
|
||||||
|
"url": f"{os.getenv('OPENAI_API_BASE', 'https://api.openai.com/v1')}/chat/completions"
|
||||||
|
}
|
||||||
|
elif provider == "mcp":
|
||||||
|
return {
|
||||||
|
"provider": "mcp",
|
||||||
|
"model": model,
|
||||||
|
"url": os.getenv("MCP_URL", "http://localhost:5000/chat")
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Unsupported provider: {provider}")
|
92
mcp/scripts/memory_store.py
Normal file
92
mcp/scripts/memory_store.py
Normal file
@@ -0,0 +1,92 @@
|
|||||||
|
# scripts/memory_store.py
|
||||||
|
import json
|
||||||
|
from pathlib import Path
|
||||||
|
from config import MEMORY_DIR
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
|
||||||
|
def load_logs(date_str=None):
|
||||||
|
if date_str is None:
|
||||||
|
date_str = datetime.now().strftime("%Y-%m-%d")
|
||||||
|
path = MEMORY_DIR / f"{date_str}.json"
|
||||||
|
if path.exists():
|
||||||
|
with open(path, "r") as f:
|
||||||
|
return json.load(f)
|
||||||
|
return []
|
||||||
|
|
||||||
|
def save_message(sender, message):
|
||||||
|
date_str = datetime.now().strftime("%Y-%m-%d")
|
||||||
|
path = MEMORY_DIR / f"{date_str}.json"
|
||||||
|
logs = load_logs(date_str)
|
||||||
|
now = datetime.now(timezone.utc).isoformat()
|
||||||
|
logs.append({"timestamp": now, "sender": sender, "message": message})
|
||||||
|
with open(path, "w") as f:
|
||||||
|
json.dump(logs, f, indent=2, ensure_ascii=False)
|
||||||
|
|
||||||
|
def search_memory(query: str):
|
||||||
|
from glob import glob
|
||||||
|
all_logs = []
|
||||||
|
pattern = re.compile(re.escape(query), re.IGNORECASE)
|
||||||
|
|
||||||
|
for file_path in sorted(MEMORY_DIR.glob("*.json")):
|
||||||
|
with open(file_path, "r") as f:
|
||||||
|
logs = json.load(f)
|
||||||
|
matched = [entry for entry in logs if pattern.search(entry["message"])]
|
||||||
|
all_logs.extend(matched)
|
||||||
|
|
||||||
|
return all_logs[-5:]
|
||||||
|
|
||||||
|
# scripts/memory_store.py
|
||||||
|
import json
|
||||||
|
from datetime import datetime
|
||||||
|
from pathlib import Path
|
||||||
|
from config import MEMORY_DIR
|
||||||
|
|
||||||
|
# ログを読み込む(指定日または当日)
|
||||||
|
def load_logs(date_str=None):
|
||||||
|
if date_str is None:
|
||||||
|
date_str = datetime.now().strftime("%Y-%m-%d")
|
||||||
|
path = MEMORY_DIR / f"{date_str}.json"
|
||||||
|
if path.exists():
|
||||||
|
with open(path, "r") as f:
|
||||||
|
return json.load(f)
|
||||||
|
return []
|
||||||
|
|
||||||
|
# メッセージを保存する
|
||||||
|
def save_message(sender, message):
|
||||||
|
date_str = datetime.now().strftime("%Y-%m-%d")
|
||||||
|
path = MEMORY_DIR / f"{date_str}.json"
|
||||||
|
logs = load_logs(date_str)
|
||||||
|
#now = datetime.utcnow().isoformat() + "Z"
|
||||||
|
now = datetime.now(timezone.utc).isoformat()
|
||||||
|
logs.append({"timestamp": now, "sender": sender, "message": message})
|
||||||
|
with open(path, "w") as f:
|
||||||
|
json.dump(logs, f, indent=2, ensure_ascii=False)
|
||||||
|
|
||||||
|
def search_memory(query: str):
|
||||||
|
from glob import glob
|
||||||
|
all_logs = []
|
||||||
|
for file_path in sorted(MEMORY_DIR.glob("*.json")):
|
||||||
|
with open(file_path, "r") as f:
|
||||||
|
logs = json.load(f)
|
||||||
|
matched = [
|
||||||
|
entry for entry in logs
|
||||||
|
if entry["sender"] == "user" and query in entry["message"]
|
||||||
|
]
|
||||||
|
all_logs.extend(matched)
|
||||||
|
return all_logs[-5:] # 最新5件だけ返す
|
||||||
|
def search_memory(query: str):
|
||||||
|
from glob import glob
|
||||||
|
all_logs = []
|
||||||
|
seen_messages = set() # すでに見たメッセージを保持
|
||||||
|
|
||||||
|
for file_path in sorted(MEMORY_DIR.glob("*.json")):
|
||||||
|
with open(file_path, "r") as f:
|
||||||
|
logs = json.load(f)
|
||||||
|
for entry in logs:
|
||||||
|
if entry["sender"] == "user" and query in entry["message"]:
|
||||||
|
# すでに同じメッセージが結果に含まれていなければ追加
|
||||||
|
if entry["message"] not in seen_messages:
|
||||||
|
all_logs.append(entry)
|
||||||
|
seen_messages.add(entry["message"])
|
||||||
|
|
||||||
|
return all_logs[-5:] # 最新5件だけ返す
|
56
mcp/scripts/server.py
Normal file
56
mcp/scripts/server.py
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
# server.py
|
||||||
|
from fastapi import FastAPI, Body
|
||||||
|
from fastapi_mcp import FastApiMCP
|
||||||
|
from pydantic import BaseModel
|
||||||
|
from memory_store import save_message, load_logs, search_memory as do_search_memory
|
||||||
|
|
||||||
|
app = FastAPI()
|
||||||
|
mcp = FastApiMCP(app, name="aigpt-agent", description="MCP Server for AI memory")
|
||||||
|
|
||||||
|
class ChatInput(BaseModel):
|
||||||
|
message: str
|
||||||
|
|
||||||
|
class MemoryInput(BaseModel):
|
||||||
|
sender: str
|
||||||
|
message: str
|
||||||
|
|
||||||
|
class MemoryQuery(BaseModel):
|
||||||
|
query: str
|
||||||
|
|
||||||
|
@app.post("/chat", operation_id="chat")
|
||||||
|
async def chat(input: ChatInput):
|
||||||
|
save_message("user", input.message)
|
||||||
|
response = f"AI: 「{input.message}」を受け取りました!"
|
||||||
|
save_message("ai", response)
|
||||||
|
return {"response": response}
|
||||||
|
|
||||||
|
@app.post("/memory", operation_id="save_memory")
|
||||||
|
async def memory_post(input: MemoryInput):
|
||||||
|
save_message(input.sender, input.message)
|
||||||
|
return {"status": "saved"}
|
||||||
|
|
||||||
|
@app.get("/memory", operation_id="get_memory")
|
||||||
|
async def memory_get():
|
||||||
|
return {"messages": load_messages()}
|
||||||
|
|
||||||
|
@app.post("/ask_message", operation_id="ask_message")
|
||||||
|
async def ask_message(input: MemoryQuery):
|
||||||
|
results = search_memory(input.query)
|
||||||
|
return {
|
||||||
|
"response": f"🔎 記憶から {len(results)} 件ヒット:\n" + "\n".join([f"{r['sender']}: {r['message']}" for r in results])
|
||||||
|
}
|
||||||
|
|
||||||
|
@app.post("/memory/search", operation_id="memory")
|
||||||
|
async def memory_search(query: MemoryQuery):
|
||||||
|
hits = do_search_memory(query.query)
|
||||||
|
if not hits:
|
||||||
|
return {"result": "🔍 記憶の中に該当する内容は見つかりませんでした。"}
|
||||||
|
summary = "\n".join([f"{e['sender']}: {e['message']}" for e in hits])
|
||||||
|
return {"result": f"🔎 見つかった記憶:\n{summary}"}
|
||||||
|
|
||||||
|
mcp.mount()
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
import uvicorn
|
||||||
|
print("🚀 Starting MCP server...")
|
||||||
|
uvicorn.run(app, host="127.0.0.1", port=5000)
|
76
mcp/scripts/summarize.py
Normal file
76
mcp/scripts/summarize.py
Normal file
@@ -0,0 +1,76 @@
|
|||||||
|
# scripts/summarize.py
|
||||||
|
import json
|
||||||
|
from datetime import datetime
|
||||||
|
from config import MEMORY_DIR, SUMMARY_DIR, load_config
|
||||||
|
import requests
|
||||||
|
|
||||||
|
def load_memory(date_str):
|
||||||
|
path = MEMORY_DIR / f"{date_str}.json"
|
||||||
|
if not path.exists():
|
||||||
|
print(f"⚠️ メモリファイルが見つかりません: {path}")
|
||||||
|
return None
|
||||||
|
with open(path, "r") as f:
|
||||||
|
return json.load(f)
|
||||||
|
|
||||||
|
def save_summary(date_str, content):
|
||||||
|
SUMMARY_DIR.mkdir(parents=True, exist_ok=True)
|
||||||
|
path = SUMMARY_DIR / f"{date_str}_summary.json"
|
||||||
|
with open(path, "w") as f:
|
||||||
|
json.dump(content, f, indent=2, ensure_ascii=False)
|
||||||
|
print(f"✅ 要約を保存しました: {path}")
|
||||||
|
|
||||||
|
def build_prompt(logs):
|
||||||
|
messages = [
|
||||||
|
{"role": "system", "content": "あなたは要約AIです。以下の会話ログを要約してください。"},
|
||||||
|
{"role": "user", "content": "\n".join(f"{entry['sender']}: {entry['message']}" for entry in logs)}
|
||||||
|
]
|
||||||
|
return messages
|
||||||
|
|
||||||
|
def summarize_with_llm(messages):
|
||||||
|
cfg = load_config()
|
||||||
|
if cfg["provider"] == "openai":
|
||||||
|
headers = {
|
||||||
|
"Authorization": f"Bearer {cfg['api_key']}",
|
||||||
|
"Content-Type": "application/json",
|
||||||
|
}
|
||||||
|
payload = {
|
||||||
|
"model": cfg["model"],
|
||||||
|
"messages": messages,
|
||||||
|
"temperature": 0.7
|
||||||
|
}
|
||||||
|
response = requests.post(cfg["url"], headers=headers, json=payload)
|
||||||
|
response.raise_for_status()
|
||||||
|
return response.json()["choices"][0]["message"]["content"]
|
||||||
|
|
||||||
|
elif cfg["provider"] == "ollama":
|
||||||
|
payload = {
|
||||||
|
"model": cfg["model"],
|
||||||
|
"prompt": "\n".join(m["content"] for m in messages),
|
||||||
|
"stream": False,
|
||||||
|
}
|
||||||
|
response = requests.post(cfg["url"], json=payload)
|
||||||
|
response.raise_for_status()
|
||||||
|
return response.json()["response"]
|
||||||
|
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Unsupported provider: {cfg['provider']}")
|
||||||
|
|
||||||
|
def main():
|
||||||
|
date_str = datetime.now().strftime("%Y-%m-%d")
|
||||||
|
logs = load_memory(date_str)
|
||||||
|
if not logs:
|
||||||
|
return
|
||||||
|
|
||||||
|
prompt_messages = build_prompt(logs)
|
||||||
|
summary_text = summarize_with_llm(prompt_messages)
|
||||||
|
|
||||||
|
summary = {
|
||||||
|
"date": date_str,
|
||||||
|
"summary": summary_text,
|
||||||
|
"total_messages": len(logs)
|
||||||
|
}
|
||||||
|
|
||||||
|
save_summary(date_str, summary)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
@@ -1,8 +1,8 @@
|
|||||||
|
# setup.py
|
||||||
from setuptools import setup
|
from setuptools import setup
|
||||||
|
|
||||||
setup(
|
setup(
|
||||||
name='mcp',
|
name='aigpt-mcp',
|
||||||
version='0.1.0',
|
|
||||||
py_modules=['cli'],
|
py_modules=['cli'],
|
||||||
entry_points={
|
entry_points={
|
||||||
'console_scripts': [
|
'console_scripts': [
|
||||||
|
@@ -1,39 +0,0 @@
|
|||||||
#!/bin/zsh
|
|
||||||
|
|
||||||
d=${0:a:h:h}
|
|
||||||
json=`cat $d/gpt.json`
|
|
||||||
toml=`cat $d/Cargo.toml`
|
|
||||||
cd $d/src/
|
|
||||||
list=(`zsh -c "ls *.rs"`)
|
|
||||||
|
|
||||||
body="
|
|
||||||
今、AGE systemを作っているよ。どんなものかというと、jsonを参照してここにすべてが書かれている。
|
|
||||||
|
|
||||||
$json
|
|
||||||
|
|
||||||
リポジトリはこちらになる。
|
|
||||||
git.syui.ai:ai/gpt.git
|
|
||||||
|
|
||||||
内容はこんな感じ。
|
|
||||||
|
|
||||||
\`\`\`toml
|
|
||||||
$toml
|
|
||||||
\`\`\`
|
|
||||||
|
|
||||||
`
|
|
||||||
for i in $list; do
|
|
||||||
if [ -f $d/src/$i ];then
|
|
||||||
t=$(cat $d/src/$i)
|
|
||||||
echo
|
|
||||||
echo '\`\`\`rust'
|
|
||||||
echo $t
|
|
||||||
echo '\`\`\`'
|
|
||||||
echo
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
`
|
|
||||||
|
|
||||||
次は何を実装すればいいと思う。
|
|
||||||
"
|
|
||||||
|
|
||||||
echo $body
|
|
126
src/chat.rs
126
src/chat.rs
@@ -1,13 +1,20 @@
|
|||||||
// src/chat.rs
|
// src/chat.rs
|
||||||
|
use std::fs;
|
||||||
use seahorse::Context;
|
|
||||||
use std::process::Command;
|
use std::process::Command;
|
||||||
|
use serde::Deserialize;
|
||||||
|
use seahorse::Context;
|
||||||
use crate::config::ConfigPaths;
|
use crate::config::ConfigPaths;
|
||||||
|
use crate::metrics::{load_user_data, save_user_data, update_metrics_decay};
|
||||||
|
//use std::process::Stdio;
|
||||||
|
//use std::io::Write;
|
||||||
|
//use std::time::Duration;
|
||||||
|
//use std::net::TcpStream;
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq)]
|
#[derive(Debug, Clone, PartialEq)]
|
||||||
pub enum Provider {
|
pub enum Provider {
|
||||||
OpenAI,
|
OpenAI,
|
||||||
Ollama,
|
Ollama,
|
||||||
|
MCP,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Provider {
|
impl Provider {
|
||||||
@@ -15,6 +22,7 @@ impl Provider {
|
|||||||
match s.to_lowercase().as_str() {
|
match s.to_lowercase().as_str() {
|
||||||
"openai" => Some(Provider::OpenAI),
|
"openai" => Some(Provider::OpenAI),
|
||||||
"ollama" => Some(Provider::Ollama),
|
"ollama" => Some(Provider::Ollama),
|
||||||
|
"mcp" => Some(Provider::MCP),
|
||||||
_ => None,
|
_ => None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -23,13 +31,11 @@ impl Provider {
|
|||||||
match self {
|
match self {
|
||||||
Provider::OpenAI => "openai",
|
Provider::OpenAI => "openai",
|
||||||
Provider::Ollama => "ollama",
|
Provider::Ollama => "ollama",
|
||||||
|
Provider::MCP => "mcp",
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
use std::fs;
|
|
||||||
use serde::Deserialize;
|
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
struct OpenAIKey {
|
struct OpenAIKey {
|
||||||
token: String,
|
token: String,
|
||||||
@@ -43,58 +49,92 @@ fn load_openai_api_key() -> Option<String> {
|
|||||||
Some(parsed.token)
|
Some(parsed.token)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn ask_chat(c: &Context, question: &str) -> String {
|
pub fn ask_chat(c: &Context, question: &str) -> Option<String> {
|
||||||
let config = ConfigPaths::new();
|
let config = ConfigPaths::new();
|
||||||
let base_dir = config.base_dir.join("mcp");
|
let base_dir = config.base_dir.join("mcp");
|
||||||
let script_path = base_dir.join("scripts/ask.py");
|
let user_path = config.base_dir.join("user.json");
|
||||||
|
|
||||||
let python_path = if cfg!(target_os = "windows") {
|
let mut user = load_user_data(&user_path);
|
||||||
base_dir.join(".venv/Scripts/python.exe")
|
user.metrics = update_metrics_decay();
|
||||||
} else {
|
|
||||||
base_dir.join(".venv/bin/python")
|
|
||||||
};
|
|
||||||
|
|
||||||
|
// 各種オプション
|
||||||
let ollama_host = c.string_flag("host").ok();
|
let ollama_host = c.string_flag("host").ok();
|
||||||
let ollama_model = c.string_flag("model").ok();
|
let ollama_model = c.string_flag("model").ok();
|
||||||
let api_key = c.string_flag("api-key").ok()
|
|
||||||
.or_else(|| load_openai_api_key());
|
|
||||||
|
|
||||||
use crate::chat::Provider;
|
|
||||||
|
|
||||||
let provider_str = c.string_flag("provider").unwrap_or_else(|_| "ollama".to_string());
|
let provider_str = c.string_flag("provider").unwrap_or_else(|_| "ollama".to_string());
|
||||||
let provider = Provider::from_str(&provider_str).unwrap_or(Provider::Ollama);
|
let provider = Provider::from_str(&provider_str).unwrap_or(Provider::Ollama);
|
||||||
|
let api_key = c.string_flag("api-key").ok().or_else(load_openai_api_key);
|
||||||
|
|
||||||
println!("🔍 使用プロバイダー: {}", provider.as_str());
|
println!("🔍 使用プロバイダー: {}", provider.as_str());
|
||||||
|
|
||||||
// 🛠️ command の定義をここで行う
|
match provider {
|
||||||
let mut command = Command::new(python_path);
|
Provider::MCP => {
|
||||||
command.arg(script_path).arg(question);
|
let client = reqwest::blocking::Client::new();
|
||||||
|
let url = std::env::var("MCP_URL").unwrap_or("http://127.0.0.1:5000/chat".to_string());
|
||||||
|
let res = client.post(url)
|
||||||
|
.json(&serde_json::json!({"message": question}))
|
||||||
|
.send();
|
||||||
|
|
||||||
// ✨ 環境変数をセット
|
match res {
|
||||||
command.env("PROVIDER", provider.as_str());
|
Ok(resp) => {
|
||||||
|
if resp.status().is_success() {
|
||||||
|
let json: serde_json::Value = resp.json().ok()?;
|
||||||
|
let text = json.get("response")?.as_str()?.to_string();
|
||||||
|
user.metrics.intimacy += 0.01;
|
||||||
|
user.metrics.last_updated = chrono::Utc::now();
|
||||||
|
save_user_data(&user_path, &user);
|
||||||
|
Some(text)
|
||||||
|
} else {
|
||||||
|
eprintln!("❌ MCPエラー: HTTP {}", resp.status());
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
eprintln!("❌ MCP接続失敗: {}", e);
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ => {
|
||||||
|
// Python 実行パス
|
||||||
|
let python_path = if cfg!(target_os = "windows") {
|
||||||
|
base_dir.join(".venv/Scripts/mcp.exe")
|
||||||
|
} else {
|
||||||
|
base_dir.join(".venv/bin/mcp")
|
||||||
|
};
|
||||||
|
|
||||||
if let Some(host) = ollama_host {
|
let mut command = Command::new(python_path);
|
||||||
command.env("OLLAMA_HOST", host);
|
command.arg("ask").arg(question);
|
||||||
}
|
|
||||||
if let Some(model) = ollama_model {
|
|
||||||
command.env("OLLAMA_MODEL", model);
|
|
||||||
}
|
|
||||||
if let Some(api_key) = api_key {
|
|
||||||
command.env("OPENAI_API_KEY", api_key);
|
|
||||||
}
|
|
||||||
|
|
||||||
let output = command
|
if let Some(host) = ollama_host {
|
||||||
.output()
|
command.env("OLLAMA_HOST", host);
|
||||||
.expect("❌ MCPチャットスクリプトの実行に失敗しました");
|
}
|
||||||
|
if let Some(model) = ollama_model {
|
||||||
|
command.env("OLLAMA_MODEL", model.clone());
|
||||||
|
command.env("OPENAI_MODEL", model);
|
||||||
|
}
|
||||||
|
command.env("PROVIDER", provider.as_str());
|
||||||
|
|
||||||
if output.status.success() {
|
if let Some(key) = api_key {
|
||||||
String::from_utf8_lossy(&output.stdout).to_string()
|
command.env("OPENAI_API_KEY", key);
|
||||||
} else {
|
}
|
||||||
eprintln!(
|
|
||||||
"❌ 実行エラー: {}\n{}",
|
let output = command.output().expect("❌ MCPチャットスクリプトの実行に失敗しました");
|
||||||
String::from_utf8_lossy(&output.stderr),
|
|
||||||
String::from_utf8_lossy(&output.stdout),
|
if output.status.success() {
|
||||||
);
|
let response = String::from_utf8_lossy(&output.stdout).to_string();
|
||||||
String::from("エラーが発生しました。")
|
user.metrics.intimacy += 0.01;
|
||||||
|
user.metrics.last_updated = chrono::Utc::now();
|
||||||
|
save_user_data(&user_path, &user);
|
||||||
|
|
||||||
|
Some(response)
|
||||||
|
} else {
|
||||||
|
eprintln!(
|
||||||
|
"❌ 実行エラー: {}\n{}",
|
||||||
|
String::from_utf8_lossy(&output.stderr),
|
||||||
|
String::from_utf8_lossy(&output.stdout),
|
||||||
|
);
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -9,6 +9,8 @@ use crate::chat::ask_chat;
|
|||||||
use crate::git::{git_init, git_status};
|
use crate::git::{git_init, git_status};
|
||||||
use crate::config::ConfigPaths;
|
use crate::config::ConfigPaths;
|
||||||
use crate::commands::git_repo::read_all_git_files;
|
use crate::commands::git_repo::read_all_git_files;
|
||||||
|
use crate::metrics::{load_user_data, save_user_data};
|
||||||
|
use crate::memory::{log_message};
|
||||||
|
|
||||||
pub fn mcp_setup() {
|
pub fn mcp_setup() {
|
||||||
let config = ConfigPaths::new();
|
let config = ConfigPaths::new();
|
||||||
@@ -30,8 +32,12 @@ pub fn mcp_setup() {
|
|||||||
"cli.py",
|
"cli.py",
|
||||||
"setup.py",
|
"setup.py",
|
||||||
"scripts/ask.py",
|
"scripts/ask.py",
|
||||||
|
"scripts/server.py",
|
||||||
|
"scripts/config.py",
|
||||||
|
"scripts/summarize.py",
|
||||||
"scripts/context_loader.py",
|
"scripts/context_loader.py",
|
||||||
"scripts/prompt_template.py",
|
"scripts/prompt_template.py",
|
||||||
|
"scripts/memory_store.py",
|
||||||
];
|
];
|
||||||
|
|
||||||
for rel_path in files_to_copy {
|
for rel_path in files_to_copy {
|
||||||
@@ -74,6 +80,12 @@ pub fn mcp_setup() {
|
|||||||
let output = OtherCommand::new(&pip_path)
|
let output = OtherCommand::new(&pip_path)
|
||||||
.arg("install")
|
.arg("install")
|
||||||
.arg("openai")
|
.arg("openai")
|
||||||
|
.arg("requests")
|
||||||
|
.arg("fastmcp")
|
||||||
|
.arg("uvicorn")
|
||||||
|
.arg("fastapi")
|
||||||
|
.arg("fastapi_mcp")
|
||||||
|
.arg("mcp")
|
||||||
.current_dir(&dest_dir)
|
.current_dir(&dest_dir)
|
||||||
.output()
|
.output()
|
||||||
.expect("pip install に失敗しました");
|
.expect("pip install に失敗しました");
|
||||||
@@ -132,7 +144,7 @@ fn set_api_key_cmd() -> Command {
|
|||||||
fn chat_cmd() -> Command {
|
fn chat_cmd() -> Command {
|
||||||
Command::new("chat")
|
Command::new("chat")
|
||||||
.description("チャットで質問を送る")
|
.description("チャットで質問を送る")
|
||||||
.usage("mcp chat '質問内容' --host <OLLAMA_HOST> --model <MODEL> [--provider <ollama|openai>] [--api-key <KEY>]")
|
.usage("mcp chat '質問内容' --host <OLLAMA_HOST> --model <MODEL> [--provider <ollama|openai>] [--api-key <KEY>] [--repo <REPO_URL>]")
|
||||||
.flag(
|
.flag(
|
||||||
Flag::new("host", FlagType::String)
|
Flag::new("host", FlagType::String)
|
||||||
.description("OLLAMAホストのURL")
|
.description("OLLAMAホストのURL")
|
||||||
@@ -159,48 +171,65 @@ fn chat_cmd() -> Command {
|
|||||||
.alias("r"),
|
.alias("r"),
|
||||||
)
|
)
|
||||||
.action(|c: &Context| {
|
.action(|c: &Context| {
|
||||||
if let Some(question) = c.args.get(0) {
|
let config = ConfigPaths::new();
|
||||||
let response = ask_chat(c, question);
|
let user_path = config.data_file("json");
|
||||||
println!("💬 応答:\n{}", response);
|
let mut user = load_user_data(&user_path);
|
||||||
} else {
|
// repoがある場合は、コードベース読み込みモード
|
||||||
eprintln!("❗ 質問が必要です: mcp chat 'こんにちは'");
|
if let Ok(repo_url) = c.string_flag("repo") {
|
||||||
|
let repo_base = config.base_dir.join("repos");
|
||||||
|
let repo_dir = repo_base.join(sanitize_repo_name(&repo_url));
|
||||||
|
|
||||||
|
if !repo_dir.exists() {
|
||||||
|
println!("📥 Gitリポジトリをクローン中: {}", repo_url);
|
||||||
|
let status = OtherCommand::new("git")
|
||||||
|
.args(&["clone", &repo_url, repo_dir.to_str().unwrap()])
|
||||||
|
.status()
|
||||||
|
.expect("❌ Gitのクローンに失敗しました");
|
||||||
|
assert!(status.success(), "Git clone エラー");
|
||||||
|
} else {
|
||||||
|
println!("✔ リポジトリはすでに存在します: {}", repo_dir.display());
|
||||||
|
}
|
||||||
|
|
||||||
|
let files = read_all_git_files(repo_dir.to_str().unwrap());
|
||||||
|
let prompt = format!(
|
||||||
|
"以下のコードベースを読み込んで、改善案や次のステップを提案してください:\n{}",
|
||||||
|
files
|
||||||
|
);
|
||||||
|
|
||||||
|
if let Some(response) = ask_chat(c, &prompt) {
|
||||||
|
println!("💬 提案:\n{}", response);
|
||||||
|
} else {
|
||||||
|
eprintln!("❗ 提案が取得できませんでした");
|
||||||
|
}
|
||||||
|
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// 通常のチャット処理(repoが指定されていない場合)
|
||||||
|
match c.args.get(0) {
|
||||||
|
Some(question) => {
|
||||||
|
log_message(&config.base_dir, "user", question);
|
||||||
|
let response = ask_chat(c, question);
|
||||||
|
|
||||||
|
if let Some(ref text) = response {
|
||||||
|
println!("💬 応答:\n{}", text);
|
||||||
|
// 返答内容に基づいて増減(返答の感情解析)
|
||||||
|
if text.contains("thank") || text.contains("great") {
|
||||||
|
user.metrics.trust += 0.05;
|
||||||
|
} else if text.contains("hate") || text.contains("bad") {
|
||||||
|
user.metrics.trust -= 0.05;
|
||||||
|
}
|
||||||
|
log_message(&config.base_dir, "ai", &text);
|
||||||
|
save_user_data(&user_path, &user);
|
||||||
|
} else {
|
||||||
|
eprintln!("❗ 応答が取得できませんでした");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
eprintln!("❗ 質問が必要です: mcp chat 'こんにちは'");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
.action(|c: &Context| {
|
|
||||||
let config = ConfigPaths::new();
|
|
||||||
if let Ok(repo_url) = c.string_flag("repo") {
|
|
||||||
let repo_base = config.base_dir.join("repos");
|
|
||||||
let repo_dir = repo_base.join(sanitize_repo_name(&repo_url));
|
|
||||||
|
|
||||||
if !repo_dir.exists() {
|
|
||||||
println!("📥 Gitリポジトリをクローン中: {}", repo_url);
|
|
||||||
let status = OtherCommand::new("git")
|
|
||||||
.args(&["clone", &repo_url, repo_dir.to_str().unwrap()])
|
|
||||||
.status()
|
|
||||||
.expect("❌ Gitのクローンに失敗しました");
|
|
||||||
assert!(status.success(), "Git clone エラー");
|
|
||||||
} else {
|
|
||||||
println!("✔ リポジトリはすでに存在します: {}", repo_dir.display());
|
|
||||||
}
|
|
||||||
|
|
||||||
//let files = read_all_git_files(&repo_dir);
|
|
||||||
let files = read_all_git_files(repo_dir.to_str().unwrap());
|
|
||||||
let prompt = format!(
|
|
||||||
"以下のコードベースを読み込んで、改善案や次のステップを提案してください:\n{}",
|
|
||||||
files
|
|
||||||
);
|
|
||||||
|
|
||||||
let response = ask_chat(c, &prompt);
|
|
||||||
println!("💡 提案:\n{}", response);
|
|
||||||
} else {
|
|
||||||
if let Some(question) = c.args.get(0) {
|
|
||||||
let response = ask_chat(c, question);
|
|
||||||
println!("💬 {}", response);
|
|
||||||
} else {
|
|
||||||
eprintln!("❗ 質問が必要です: mcp chat 'こんにちは'");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn init_cmd() -> Command {
|
fn init_cmd() -> Command {
|
||||||
|
@@ -1,29 +1,127 @@
|
|||||||
// src/commands/scheduler.rs
|
// src/commands/scheduler.rs
|
||||||
|
|
||||||
use seahorse::{Command, Context};
|
use seahorse::{Command, Context};
|
||||||
use std::thread;
|
use std::thread;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use chrono::Local;
|
use chrono::{Local, Utc, Timelike};
|
||||||
|
use crate::metrics::{load_user_data, save_user_data};
|
||||||
|
use crate::config::ConfigPaths;
|
||||||
|
use crate::chat::ask_chat;
|
||||||
|
use rand::prelude::*;
|
||||||
|
use rand::rng;
|
||||||
|
|
||||||
|
fn send_scheduled_message() {
|
||||||
|
let config = ConfigPaths::new();
|
||||||
|
let user_path = config.data_file("json");
|
||||||
|
let mut user = load_user_data(&user_path);
|
||||||
|
|
||||||
|
if !user.metrics.can_send {
|
||||||
|
println!("🚫 送信条件を満たしていないため、スケジュール送信スキップ");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// 日付の比較(1日1回制限)
|
||||||
|
let today = Local::now().format("%Y-%m-%d").to_string();
|
||||||
|
if let Some(last_date) = &user.messaging.last_sent_date {
|
||||||
|
if last_date != &today {
|
||||||
|
user.messaging.sent_today = false;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
user.messaging.sent_today = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if user.messaging.sent_today {
|
||||||
|
println!("🔁 本日はすでに送信済みです: {}", today);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(schedule_str) = &user.messaging.schedule_time {
|
||||||
|
let now = Local::now();
|
||||||
|
let target: Vec<&str> = schedule_str.split(':').collect();
|
||||||
|
|
||||||
|
if target.len() != 2 {
|
||||||
|
println!("⚠️ schedule_time形式が無効です: {}", schedule_str);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
let (sh, sm) = (target[0].parse::<u32>(), target[1].parse::<u32>());
|
||||||
|
if let (Ok(sh), Ok(sm)) = (sh, sm) {
|
||||||
|
if now.hour() == sh && now.minute() == sm {
|
||||||
|
if let Some(msg) = user.messaging.templates.choose(&mut rng()) {
|
||||||
|
println!("💬 自動送信メッセージ: {}", msg);
|
||||||
|
let dummy_context = Context::new(vec![], None, "".to_string());
|
||||||
|
ask_chat(&dummy_context, msg);
|
||||||
|
user.metrics.intimacy += 0.03;
|
||||||
|
|
||||||
|
// 送信済みのフラグ更新
|
||||||
|
user.messaging.sent_today = true;
|
||||||
|
user.messaging.last_sent_date = Some(today);
|
||||||
|
|
||||||
|
save_user_data(&user_path, &user);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
pub fn scheduler_cmd() -> Command {
|
pub fn scheduler_cmd() -> Command {
|
||||||
Command::new("scheduler")
|
Command::new("scheduler")
|
||||||
.usage("scheduler [interval_sec]")
|
.usage("scheduler [interval_sec]")
|
||||||
.alias("s")
|
.alias("s")
|
||||||
|
.description("定期的に送信条件をチェックし、自発的なメッセージ送信を試みる")
|
||||||
.action(|c: &Context| {
|
.action(|c: &Context| {
|
||||||
let interval = c.args.get(0)
|
let interval = c.args.get(0)
|
||||||
.and_then(|s| s.parse::<u64>().ok())
|
.and_then(|s| s.parse::<u64>().ok())
|
||||||
.unwrap_or(60); // デフォルト: 60秒ごと
|
.unwrap_or(3600); // デフォルト: 1時間(テストしやすく)
|
||||||
|
|
||||||
println!("⏳ スケジューラー開始({interval}秒ごと)...");
|
println!("⏳ スケジューラー開始({}秒ごと)...", interval);
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
let now = Local::now();
|
let config = ConfigPaths::new();
|
||||||
println!("🔁 タスク実行中: {}", now.format("%Y-%m-%d %H:%M:%S"));
|
let user_path = config.data_file("json");
|
||||||
|
let mut user = load_user_data(&user_path);
|
||||||
// ここで talk_cmd や save_cmd の内部処理を呼ぶ感じ
|
|
||||||
// たとえば load_config → AI更新 → print とか
|
|
||||||
|
|
||||||
|
let now = Utc::now();
|
||||||
|
let elapsed = now.signed_duration_since(user.metrics.last_updated);
|
||||||
|
let hours = elapsed.num_minutes() as f32 / 60.0;
|
||||||
|
|
||||||
|
let speed_factor = if hours > 48.0 {
|
||||||
|
2.0
|
||||||
|
} else if hours > 24.0 {
|
||||||
|
1.5
|
||||||
|
} else {
|
||||||
|
1.0
|
||||||
|
};
|
||||||
|
|
||||||
|
user.metrics.trust = (user.metrics.trust - 0.01 * speed_factor).clamp(0.0, 1.0);
|
||||||
|
user.metrics.intimacy = (user.metrics.intimacy - 0.01 * speed_factor).clamp(0.0, 1.0);
|
||||||
|
user.metrics.energy = (user.metrics.energy - 0.01 * speed_factor).clamp(0.0, 1.0);
|
||||||
|
|
||||||
|
user.metrics.can_send =
|
||||||
|
user.metrics.trust >= 0.5 &&
|
||||||
|
user.metrics.intimacy >= 0.5 &&
|
||||||
|
user.metrics.energy >= 0.5;
|
||||||
|
|
||||||
|
user.metrics.last_updated = now;
|
||||||
|
|
||||||
|
if user.metrics.can_send {
|
||||||
|
println!("💡 AIメッセージ送信条件を満たしています(信頼:{:.2}, 親密:{:.2}, エネルギー:{:.2})",
|
||||||
|
user.metrics.trust,
|
||||||
|
user.metrics.intimacy,
|
||||||
|
user.metrics.energy
|
||||||
|
);
|
||||||
|
send_scheduled_message();
|
||||||
|
} else {
|
||||||
|
println!("🤫 条件未達成のため送信スキップ: trust={:.2}, intimacy={:.2}, energy={:.2}",
|
||||||
|
user.metrics.trust,
|
||||||
|
user.metrics.intimacy,
|
||||||
|
user.metrics.energy
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
save_user_data(&user_path, &user);
|
||||||
thread::sleep(Duration::from_secs(interval));
|
thread::sleep(Duration::from_secs(interval));
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -8,6 +8,8 @@ mod commands;
|
|||||||
mod config;
|
mod config;
|
||||||
mod git;
|
mod git;
|
||||||
mod chat;
|
mod chat;
|
||||||
|
mod metrics;
|
||||||
|
mod memory;
|
||||||
|
|
||||||
use cli::cli_app;
|
use cli::cli_app;
|
||||||
use seahorse::App;
|
use seahorse::App;
|
||||||
|
49
src/memory.rs
Normal file
49
src/memory.rs
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
// src/memory.rs
|
||||||
|
use chrono::{DateTime, Local, Utc};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use std::fs::{self};
|
||||||
|
//use std::fs::{self, OpenOptions};
|
||||||
|
use std::io::{BufReader, BufWriter};
|
||||||
|
use std::path::PathBuf;
|
||||||
|
use std::{fs::File};
|
||||||
|
//use std::{env, fs::File};
|
||||||
|
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
pub struct MemoryEntry {
|
||||||
|
pub timestamp: DateTime<Utc>,
|
||||||
|
pub sender: String,
|
||||||
|
pub message: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn log_message(base_dir: &PathBuf, sender: &str, message: &str) {
|
||||||
|
let now_utc = Utc::now();
|
||||||
|
let date_str = Local::now().format("%Y-%m-%d").to_string();
|
||||||
|
let mut file_path = base_dir.clone();
|
||||||
|
file_path.push("memory");
|
||||||
|
let _ = fs::create_dir_all(&file_path);
|
||||||
|
file_path.push(format!("{}.json", date_str));
|
||||||
|
|
||||||
|
let new_entry = MemoryEntry {
|
||||||
|
timestamp: now_utc,
|
||||||
|
sender: sender.to_string(),
|
||||||
|
message: message.to_string(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut entries = if file_path.exists() {
|
||||||
|
let file = File::open(&file_path).expect("💥 メモリファイルの読み込み失敗");
|
||||||
|
let reader = BufReader::new(file);
|
||||||
|
serde_json::from_reader(reader).unwrap_or_else(|_| vec![])
|
||||||
|
} else {
|
||||||
|
vec![]
|
||||||
|
};
|
||||||
|
|
||||||
|
entries.push(new_entry);
|
||||||
|
|
||||||
|
let file = File::create(&file_path).expect("💥 メモリファイルの書き込み失敗");
|
||||||
|
let writer = BufWriter::new(file);
|
||||||
|
serde_json::to_writer_pretty(writer, &entries).expect("💥 JSONの書き込み失敗");
|
||||||
|
}
|
||||||
|
|
||||||
|
// 利用例(ask_chatの中)
|
||||||
|
// log_message(&config.base_dir, "user", question);
|
||||||
|
// log_message(&config.base_dir, "ai", &response);
|
147
src/metrics.rs
Normal file
147
src/metrics.rs
Normal file
@@ -0,0 +1,147 @@
|
|||||||
|
// src/metrics.rs
|
||||||
|
use chrono::{DateTime, Utc};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use std::fs;
|
||||||
|
use std::path::Path;
|
||||||
|
|
||||||
|
use crate::config::ConfigPaths;
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct Metrics {
|
||||||
|
pub trust: f32,
|
||||||
|
pub intimacy: f32,
|
||||||
|
pub energy: f32,
|
||||||
|
pub can_send: bool,
|
||||||
|
pub last_updated: DateTime<Utc>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct Personality {
|
||||||
|
pub kind: String,
|
||||||
|
pub strength: f32,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct Relationship {
|
||||||
|
pub trust: f32,
|
||||||
|
pub intimacy: f32,
|
||||||
|
pub curiosity: f32,
|
||||||
|
pub threshold: f32,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct Environment {
|
||||||
|
pub luck_today: f32,
|
||||||
|
pub luck_history: Vec<f32>,
|
||||||
|
pub level: i32,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct Messaging {
|
||||||
|
pub enabled: bool,
|
||||||
|
pub schedule_time: Option<String>,
|
||||||
|
pub decay_rate: f32,
|
||||||
|
pub templates: Vec<String>,
|
||||||
|
pub sent_today: bool, // 追加
|
||||||
|
pub last_sent_date: Option<String>, // 追加
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct Memory {
|
||||||
|
pub recent_messages: Vec<String>,
|
||||||
|
pub long_term_notes: Vec<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct UserData {
|
||||||
|
pub personality: Personality,
|
||||||
|
pub relationship: Relationship,
|
||||||
|
pub environment: Environment,
|
||||||
|
pub messaging: Messaging,
|
||||||
|
pub last_interaction: DateTime<Utc>,
|
||||||
|
pub memory: Memory,
|
||||||
|
pub metrics: Metrics,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Metrics {
|
||||||
|
pub fn decay(&mut self) {
|
||||||
|
let now = Utc::now();
|
||||||
|
let hours = (now - self.last_updated).num_minutes() as f32 / 60.0;
|
||||||
|
self.trust = decay_param(self.trust, hours);
|
||||||
|
self.intimacy = decay_param(self.intimacy, hours);
|
||||||
|
self.energy = decay_param(self.energy, hours);
|
||||||
|
self.can_send = self.trust >= 0.5 && self.intimacy >= 0.5 && self.energy >= 0.5;
|
||||||
|
self.last_updated = now;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn load_user_data(path: &Path) -> UserData {
|
||||||
|
let config = ConfigPaths::new();
|
||||||
|
let example_path = Path::new("example.json");
|
||||||
|
config.ensure_file_exists("json", example_path);
|
||||||
|
|
||||||
|
if !path.exists() {
|
||||||
|
return UserData {
|
||||||
|
personality: Personality {
|
||||||
|
kind: "positive".into(),
|
||||||
|
strength: 0.8,
|
||||||
|
},
|
||||||
|
relationship: Relationship {
|
||||||
|
trust: 0.2,
|
||||||
|
intimacy: 0.6,
|
||||||
|
curiosity: 0.5,
|
||||||
|
threshold: 1.5,
|
||||||
|
},
|
||||||
|
environment: Environment {
|
||||||
|
luck_today: 0.9,
|
||||||
|
luck_history: vec![0.9, 0.9, 0.9],
|
||||||
|
level: 1,
|
||||||
|
},
|
||||||
|
messaging: Messaging {
|
||||||
|
enabled: true,
|
||||||
|
schedule_time: Some("08:00".to_string()),
|
||||||
|
decay_rate: 0.1,
|
||||||
|
templates: vec![
|
||||||
|
"おはよう!今日もがんばろう!".to_string(),
|
||||||
|
"ねえ、話したいことがあるの。".to_string(),
|
||||||
|
],
|
||||||
|
sent_today: false,
|
||||||
|
last_sent_date: None,
|
||||||
|
},
|
||||||
|
last_interaction: Utc::now(),
|
||||||
|
memory: Memory {
|
||||||
|
recent_messages: vec![],
|
||||||
|
long_term_notes: vec![],
|
||||||
|
},
|
||||||
|
metrics: Metrics {
|
||||||
|
trust: 0.5,
|
||||||
|
intimacy: 0.5,
|
||||||
|
energy: 0.5,
|
||||||
|
can_send: true,
|
||||||
|
last_updated: Utc::now(),
|
||||||
|
},
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
let content = fs::read_to_string(path).expect("user.json の読み込みに失敗しました");
|
||||||
|
serde_json::from_str(&content).expect("user.json のパースに失敗しました")
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn save_user_data(path: &Path, data: &UserData) {
|
||||||
|
let content = serde_json::to_string_pretty(data).expect("user.json のシリアライズ失敗");
|
||||||
|
fs::write(path, content).expect("user.json の書き込みに失敗しました");
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn update_metrics_decay() -> Metrics {
|
||||||
|
let config = ConfigPaths::new();
|
||||||
|
let path = config.base_dir.join("user.json");
|
||||||
|
let mut data = load_user_data(&path);
|
||||||
|
data.metrics.decay();
|
||||||
|
save_user_data(&path, &data);
|
||||||
|
data.metrics
|
||||||
|
}
|
||||||
|
|
||||||
|
fn decay_param(value: f32, hours: f32) -> f32 {
|
||||||
|
let decay_rate = 0.05;
|
||||||
|
(value * (1.0f32 - decay_rate).powf(hours)).clamp(0.0, 1.0)
|
||||||
|
}
|
@@ -1,42 +0,0 @@
|
|||||||
use std::env;
|
|
||||||
use std::process::{Command, Stdio};
|
|
||||||
use std::io::{self, Write};
|
|
||||||
|
|
||||||
fn main() {
|
|
||||||
let args: Vec<String> = env::args().collect();
|
|
||||||
if args.len() < 2 {
|
|
||||||
eprintln!("Usage: langchain_cli <prompt>");
|
|
||||||
std::process::exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
let prompt = &args[1];
|
|
||||||
|
|
||||||
// Simulate a pipeline stage: e.g., tokenization, reasoning, response generation
|
|
||||||
let stages = vec!["Tokenize", "Reason", "Generate"];
|
|
||||||
|
|
||||||
for stage in &stages {
|
|
||||||
println!("[Stage: {}] Processing...", stage);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Example call to Python-based LangChain (assuming you have a script or API to call)
|
|
||||||
// For placeholder purposes, we echo the prompt back.
|
|
||||||
let output = Command::new("python3")
|
|
||||||
.arg("-c")
|
|
||||||
.arg(format!("print(\"LangChain Agent Response for: {}\")", prompt))
|
|
||||||
.stdout(Stdio::piped())
|
|
||||||
.spawn()
|
|
||||||
.expect("failed to execute process")
|
|
||||||
.wait_with_output()
|
|
||||||
.expect("failed to wait on child");
|
|
||||||
|
|
||||||
io::stdout().write_all(&output.stdout).unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
TODO (for future LangChain-style pipeline):
|
|
||||||
1. Implement trait-based agent components: Tokenizer, Retriever, Reasoner, Generator.
|
|
||||||
2. Allow config via YAML or TOML to define chain flow.
|
|
||||||
3. Async pipeline support with Tokio.
|
|
||||||
4. Optional integration with LLM APIs (OpenAI, Ollama, etc).
|
|
||||||
5. Rust-native vector search (e.g. using `tantivy`, `qdrant-client`).
|
|
||||||
*/
|
|
133
tmp/data.rs
133
tmp/data.rs
@@ -1,133 +0,0 @@
|
|||||||
#[derive(Debug, Serialize, Deserialize)]
|
|
||||||
pub struct RelationalAutonomousAI {
|
|
||||||
pub system_name: String,
|
|
||||||
pub description: String,
|
|
||||||
pub core_components: CoreComponents,
|
|
||||||
pub extensions: Extensions,
|
|
||||||
pub note: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
|
||||||
pub struct CoreComponents {
|
|
||||||
pub personality: Personality,
|
|
||||||
pub relationship: Relationship,
|
|
||||||
pub environment: Environment,
|
|
||||||
pub memory: Memory,
|
|
||||||
pub message_trigger: MessageTrigger,
|
|
||||||
pub message_generation: MessageGeneration,
|
|
||||||
pub state_transition: StateTransition,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
|
||||||
pub struct Personality {
|
|
||||||
pub r#type: String,
|
|
||||||
pub variants: Vec<String>,
|
|
||||||
pub parameters: PersonalityParameters,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
|
||||||
pub struct PersonalityParameters {
|
|
||||||
pub message_trigger_style: String,
|
|
||||||
pub decay_rate_modifier: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
|
||||||
pub struct Relationship {
|
|
||||||
pub parameters: Vec<String>,
|
|
||||||
pub properties: RelationshipProperties,
|
|
||||||
pub decay_function: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
|
||||||
pub struct RelationshipProperties {
|
|
||||||
pub persistent: bool,
|
|
||||||
pub hidden: bool,
|
|
||||||
pub irreversible: bool,
|
|
||||||
pub decay_over_time: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
|
||||||
pub struct Environment {
|
|
||||||
pub daily_luck: DailyLuck,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
|
||||||
pub struct DailyLuck {
|
|
||||||
pub r#type: String,
|
|
||||||
pub range: Vec<f32>,
|
|
||||||
pub update: String,
|
|
||||||
pub streak_mechanism: StreakMechanism,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
|
||||||
pub struct StreakMechanism {
|
|
||||||
pub trigger: String,
|
|
||||||
pub effect: String,
|
|
||||||
pub chance: f32,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
|
||||||
pub struct Memory {
|
|
||||||
pub long_term_memory: String,
|
|
||||||
pub short_term_context: String,
|
|
||||||
pub usage_in_generation: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
|
||||||
pub struct MessageTrigger {
|
|
||||||
pub condition: TriggerCondition,
|
|
||||||
pub timing: TriggerTiming,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
|
||||||
pub struct TriggerCondition {
|
|
||||||
pub relationship_threshold: String,
|
|
||||||
pub time_decay: bool,
|
|
||||||
pub environment_luck: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
|
||||||
pub struct TriggerTiming {
|
|
||||||
pub based_on: Vec<String>,
|
|
||||||
pub modifiers: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
|
||||||
pub struct MessageGeneration {
|
|
||||||
pub style_variants: Vec<String>,
|
|
||||||
pub influenced_by: Vec<String>,
|
|
||||||
pub llm_integration: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
|
||||||
pub struct StateTransition {
|
|
||||||
pub states: Vec<String>,
|
|
||||||
pub transitions: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
|
||||||
pub struct Extensions {
|
|
||||||
pub persistence: Persistence,
|
|
||||||
pub api: Api,
|
|
||||||
pub scheduler: Scheduler,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
|
||||||
pub struct Persistence {
|
|
||||||
pub database: String,
|
|
||||||
pub storage_items: Vec<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
|
||||||
pub struct Api {
|
|
||||||
pub llm: String,
|
|
||||||
pub mode: String,
|
|
||||||
pub external_event_trigger: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
|
||||||
pub struct Scheduler {
|
|
||||||
pub async_event_loop: bool,
|
|
||||||
pub interval_check: i32,
|
|
||||||
pub time_decay_check: bool,
|
|
||||||
}
|
|
File diff suppressed because one or more lines are too long
@@ -1,46 +0,0 @@
|
|||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use std::fs::File;
|
|
||||||
use std::io::{BufReader, Write};
|
|
||||||
use std::time::{SystemTime, UNIX_EPOCH};
|
|
||||||
|
|
||||||
mod model;
|
|
||||||
use model::RelationalAutonomousAI;
|
|
||||||
|
|
||||||
fn load_config(path: &str) -> std::io::Result<RelationalAutonomousAI> {
|
|
||||||
let file = File::open(path)?;
|
|
||||||
let reader = BufReader::new(file);
|
|
||||||
let config: RelationalAutonomousAI = serde_json::from_reader(reader)?;
|
|
||||||
Ok(config)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn save_config(config: &RelationalAutonomousAI, path: &str) -> std::io::Result<()> {
|
|
||||||
let mut file = File::create(path)?;
|
|
||||||
let json = serde_json::to_string_pretty(config)?;
|
|
||||||
file.write_all(json.as_bytes())?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn should_send_message(config: &RelationalAutonomousAI) -> bool {
|
|
||||||
// 簡易な送信条件: relationshipが高く、daily_luckが0.8以上
|
|
||||||
config.core_components.relationship.parameters.contains(&"trust".to_string())
|
|
||||||
&& config.core_components.environment.daily_luck.range[1] >= 0.8
|
|
||||||
}
|
|
||||||
|
|
||||||
fn main() -> std::io::Result<()> {
|
|
||||||
let path = "config.json";
|
|
||||||
|
|
||||||
let mut config = load_config(path)?;
|
|
||||||
|
|
||||||
if should_send_message(&config) {
|
|
||||||
println!("💌 メッセージを送信できます: {:?}", config.core_components.personality.r#type);
|
|
||||||
|
|
||||||
// ステート変化の例: メッセージ送信後に記録用トランジションを追加
|
|
||||||
config.core_components.state_transition.transitions.push("message_sent".to_string());
|
|
||||||
|
|
||||||
save_config(&config, path)?;
|
|
||||||
} else {
|
|
||||||
println!("😶 まだ送信条件に達していません。");
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
Reference in New Issue
Block a user