1
0
shell/example/custom_llm_mcp_server.py
2025-06-01 23:35:22 +09:00

29 lines
835 B
Python

# custom_llm_mcp_server.py
import asyncio
import json
from mcp.server import Server
from mcp.types import Tool, TextContent
import requests
app = Server("local-llm-mcp")
@app.tool("run_local_llm")
async def run_local_llm(prompt: str, model: str = "qwen2.5-coder:14b") -> str:
"""ローカルLLMでコード生成・分析を実行"""
response = requests.post("http://localhost:11434/api/generate", json={
"model": model,
"prompt": prompt,
"stream": False
})
return response.json()["response"]
@app.tool("execute_code")
async def execute_code(code: str, language: str = "python") -> str:
"""生成されたコードを実行"""
# セキュアな実行環境でコード実行
# Docker containerやsandbox環境推奨
pass
if __name__ == "__main__":
asyncio.run(app.run())