1
0
This commit is contained in:
syui 2025-05-22 21:30:47 +09:00
parent 9cbf5da3fd
commit f97da41595
Signed by: syui
GPG Key ID: 5417CFEBAD92DF56
9 changed files with 168 additions and 57 deletions

1
.gitignore vendored
View File

@ -3,3 +3,4 @@
output.json
config/*.db
aigpt
mcp/scripts/__*

View File

@ -33,7 +33,7 @@ $ ./aigpt mcp chat "hello world!" --host http://localhost:11434 --model syui/ai
---
# openai api
$ ./aigpt mcp set-api -api sk-abc123
$ ./aigpt mcp set-api --api sk-abc123
$ ./aigpt mcp chat "こんにちは" -p openai -m gpt-4o-mini
---

View File

@ -1,3 +1,28 @@
# cli.py
import sys
import subprocess
from pathlib import Path
SCRIPT_DIR = Path.home() / ".config" / "aigpt" / "mcp" / "scripts"
def run_script(name):
script_path = SCRIPT_DIR / f"{name}.py"
if not script_path.exists():
print(f"❌ スクリプトが見つかりません: {script_path}")
sys.exit(1)
args = sys.argv[2:] # ← "ask" の後の引数を取り出す
result = subprocess.run(["python", str(script_path)] + args, capture_output=True, text=True)
print(result.stdout)
if result.stderr:
print(result.stderr)
def main():
print("Hello MCP!")
if len(sys.argv) < 2:
print("Usage: mcp <script>")
return
command = sys.argv[1]
if command in {"summarize", "ask", "setup"}:
run_script(command)
else:
print(f"❓ 未知のコマンド: {command}")

View File

@ -1,55 +1,28 @@
import os
import json
import httpx
import openai
from context_loader import load_context_from_repo
from prompt_template import PROMPT_TEMPLATE
PROVIDER = os.getenv("PROVIDER", "ollama") # "ollama" or "openai"
# Ollama用
OLLAMA_HOST = os.getenv("OLLAMA_HOST", "http://localhost:11434")
OLLAMA_URL = f"{OLLAMA_HOST}/api/generate"
OLLAMA_MODEL = os.getenv("MODEL", "syui/ai")
# OpenAI用
OPENAI_BASE = os.getenv("OPENAI_API_BASE", "https://api.openai.com/v1")
OPENAI_KEY = os.getenv("OPENAI_API_KEY", "")
OPENAI_MODEL = os.getenv("MODEL", "gpt-4o-mini")
def ask_question(question, repo_path="."):
context = load_context_from_repo(repo_path)
prompt = PROMPT_TEMPLATE.format(context=context[:10000], question=question)
if PROVIDER == "ollama":
payload = {
"model": OLLAMA_MODEL,
"prompt": prompt,
"stream": False
}
response = httpx.post(OLLAMA_URL, json=payload, timeout=60.0)
result = response.json()
return result.get("response", "返答がありませんでした。")
elif PROVIDER == "openai":
import openai
openai.api_key = OPENAI_KEY
openai.api_base = OPENAI_BASE
client = openai.OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
response = client.chat.completions.create(
model=OPENAI_MODEL,
messages=[{"role": "user", "content": prompt}]
)
return response.choices[0].message.content
## scripts/ask.py
import sys
import requests
from config import load_config
def ask(prompt):
cfg = load_config()
if cfg["provider"] == "ollama":
payload = {"model": cfg["model"], "prompt": prompt, "stream": False}
response = requests.post(cfg["url"], json=payload)
print(response.json().get("response", "❌ No response"))
else:
return f"❌ 未知のプロバイダです: {PROVIDER}"
headers = {
"Authorization": f"Bearer {cfg['api_key']}",
"Content-Type": "application/json"
}
payload = {
"model": cfg["model"],
"messages": [{"role": "user", "content": prompt}]
}
response = requests.post(cfg["url"], headers=headers, json=payload)
print(response.json().get("choices", [{}])[0].get("message", {}).get("content", "❌ No content"))
if __name__ == "__main__":
import sys
question = " ".join(sys.argv[1:])
answer = ask_question(question)
print("\n🧠 回答:\n", answer)
if len(sys.argv) < 2:
print("Usage: python ask.py 'your message'")
sys.exit(1)
ask(sys.argv[1])

34
mcp/scripts/config.py Normal file
View File

@ -0,0 +1,34 @@
# scripts/config.py
import os
from pathlib import Path
def load_config():
provider = os.getenv("PROVIDER", "ollama")
model = os.getenv("MODEL", "syui/ai" if provider == "ollama" else "gpt-4o-mini")
api_key = os.getenv("OPENAI_API_KEY", "")
if provider == "ollama":
return {
"provider": "ollama",
"model": model,
"url": f"{os.getenv('OLLAMA_HOST', 'http://localhost:11434')}/api/generate"
}
elif provider == "openai":
return {
"provider": "openai",
"model": model,
"api_key": api_key,
"url": f"{os.getenv('OPENAI_API_BASE', 'https://api.openai.com/v1')}/chat/completions"
}
else:
raise ValueError(f"Unsupported provider: {provider}")
# ディレクトリ設定
BASE_DIR = Path.home() / ".config" / "aigpt"
MEMORY_DIR = BASE_DIR / "memory"
SUMMARY_DIR = MEMORY_DIR / "summary"
# 初期化(必要に応じて作成)
BASE_DIR.mkdir(parents=True, exist_ok=True)
MEMORY_DIR.mkdir(parents=True, exist_ok=True)
SUMMARY_DIR.mkdir(parents=True, exist_ok=True)

76
mcp/scripts/summarize.py Normal file
View File

@ -0,0 +1,76 @@
# scripts/summarize.py
import json
from datetime import datetime
from config import MEMORY_DIR, SUMMARY_DIR, load_config
import requests
def load_memory(date_str):
path = MEMORY_DIR / f"{date_str}.json"
if not path.exists():
print(f"⚠️ メモリファイルが見つかりません: {path}")
return None
with open(path, "r") as f:
return json.load(f)
def save_summary(date_str, content):
SUMMARY_DIR.mkdir(parents=True, exist_ok=True)
path = SUMMARY_DIR / f"{date_str}_summary.json"
with open(path, "w") as f:
json.dump(content, f, indent=2, ensure_ascii=False)
print(f"✅ 要約を保存しました: {path}")
def build_prompt(logs):
messages = [
{"role": "system", "content": "あなたは要約AIです。以下の会話ログを要約してください。"},
{"role": "user", "content": "\n".join(f"{entry['sender']}: {entry['message']}" for entry in logs)}
]
return messages
def summarize_with_llm(messages):
cfg = load_config()
if cfg["provider"] == "openai":
headers = {
"Authorization": f"Bearer {cfg['api_key']}",
"Content-Type": "application/json",
}
payload = {
"model": cfg["model"],
"messages": messages,
"temperature": 0.7
}
response = requests.post(cfg["url"], headers=headers, json=payload)
response.raise_for_status()
return response.json()["choices"][0]["message"]["content"]
elif cfg["provider"] == "ollama":
payload = {
"model": cfg["model"],
"prompt": "\n".join(m["content"] for m in messages),
"stream": False,
}
response = requests.post(cfg["url"], json=payload)
response.raise_for_status()
return response.json()["response"]
else:
raise ValueError(f"Unsupported provider: {cfg['provider']}")
def main():
date_str = datetime.now().strftime("%Y-%m-%d")
logs = load_memory(date_str)
if not logs:
return
prompt_messages = build_prompt(logs)
summary_text = summarize_with_llm(prompt_messages)
summary = {
"date": date_str,
"summary": summary_text,
"total_messages": len(logs)
}
save_summary(date_str, summary)
if __name__ == "__main__":
main()

View File

@ -1,3 +1,4 @@
# setup.py
from setuptools import setup
setup(

View File

@ -45,7 +45,6 @@ fn load_openai_api_key() -> Option<String> {
pub fn ask_chat(c: &Context, question: &str) -> Option<String> {
let config = ConfigPaths::new();
let base_dir = config.base_dir.join("mcp");
let script_path = base_dir.join("scripts/ask.py");
let user_path = config.base_dir.join("user.json");
let mut user = load_user_data(&user_path);
@ -53,9 +52,9 @@ pub fn ask_chat(c: &Context, question: &str) -> Option<String> {
// Python 実行パス
let python_path = if cfg!(target_os = "windows") {
base_dir.join(".venv/Scripts/python.exe")
base_dir.join(".venv/Scripts/mcp.exe")
} else {
base_dir.join(".venv/bin/python")
base_dir.join(".venv/bin/mcp")
};
// 各種オプション
@ -69,7 +68,7 @@ pub fn ask_chat(c: &Context, question: &str) -> Option<String> {
// Python コマンド準備
let mut command = Command::new(python_path);
command.arg(script_path).arg(question);
command.arg("ask").arg(question);
if let Some(host) = ollama_host {
command.env("OLLAMA_HOST", host);

View File

@ -32,6 +32,7 @@ pub fn mcp_setup() {
"cli.py",
"setup.py",
"scripts/ask.py",
"scripts/summarize.py",
"scripts/context_loader.py",
"scripts/prompt_template.py",
];
@ -76,6 +77,7 @@ pub fn mcp_setup() {
let output = OtherCommand::new(&pip_path)
.arg("install")
.arg("openai")
.arg("requests")
.current_dir(&dest_dir)
.output()
.expect("pip install に失敗しました");