42 lines
1.1 KiB
Python
42 lines
1.1 KiB
Python
# server.py
|
|
from fastapi import FastAPI
|
|
from fastapi_mcp import FastApiMCP
|
|
from pydantic import BaseModel
|
|
from memory_store import save_message, load_messages
|
|
|
|
app = FastAPI()
|
|
mcp = FastApiMCP(app, name="aigpt-agent", description="MCP Server for AI memory")
|
|
|
|
# --- モデル定義 ---
|
|
class ChatInput(BaseModel):
|
|
message: str
|
|
|
|
class MemoryInput(BaseModel):
|
|
sender: str
|
|
message: str
|
|
|
|
# --- ツール(エンドポイント)定義 ---
|
|
@app.post("/chat", operation_id="chat")
|
|
async def chat(input: ChatInput):
|
|
save_message("user", input.message)
|
|
response = f"AI: 「{input.message}」を受け取りました!"
|
|
save_message("ai", response)
|
|
return {"response": response}
|
|
|
|
@app.post("/memory", operation_id="save_memory")
|
|
async def memory_post(input: MemoryInput):
|
|
save_message(input.sender, input.message)
|
|
return {"status": "saved"}
|
|
|
|
@app.get("/memory", operation_id="get_memory")
|
|
async def memory_get():
|
|
return {"messages": load_messages()}
|
|
|
|
# --- MCP 初期化 ---
|
|
mcp.mount()
|
|
|
|
if __name__ == "__main__":
|
|
import uvicorn
|
|
print("🚀 Starting MCP server...")
|
|
uvicorn.run(app, host="127.0.0.1", port=5000)
|