289 lines
9.8 KiB
Python
289 lines
9.8 KiB
Python
#!/usr/bin/env python3
|
|
"""
|
|
ai.shell MCP Server
|
|
A Model Context Protocol server for ai.shell CLI
|
|
"""
|
|
|
|
import asyncio
|
|
import json
|
|
import logging
|
|
from typing import Dict, Any, Optional
|
|
from pathlib import Path
|
|
import sys
|
|
import os
|
|
sys.path.append(str(Path(__file__).parent / "docs"))
|
|
|
|
from aiohttp import web
|
|
import aiohttp_cors
|
|
|
|
# Import LocalLLMServer implementation
|
|
import requests
|
|
|
|
class LocalLLMServer:
|
|
"""Simplified LocalLLMServer for ai.shell"""
|
|
def __init__(self, model: str = "qwen2.5-coder:7b"):
|
|
self.model = model
|
|
self.ollama_url = "http://localhost:11434"
|
|
|
|
async def code_with_local_llm(self, prompt: str, language: str = "python") -> Dict[str, Any]:
|
|
"""Generate code using local LLM"""
|
|
system_prompt = f"You are an expert {language} programmer. Generate clean, well-commented code."
|
|
|
|
try:
|
|
response = requests.post(
|
|
f"{self.ollama_url}/api/generate",
|
|
json={
|
|
"model": self.model,
|
|
"prompt": f"{system_prompt}\n\nUser: {prompt}\n\nPlease provide the code:",
|
|
"stream": False,
|
|
"options": {
|
|
"temperature": 0.1,
|
|
"top_p": 0.95,
|
|
}
|
|
},
|
|
timeout=300
|
|
)
|
|
|
|
if response.status_code == 200:
|
|
result = response.json()
|
|
code = result.get("response", "")
|
|
return {"code": code, "language": language}
|
|
else:
|
|
return {"error": f"Ollama returned status {response.status_code}"}
|
|
|
|
except Exception as e:
|
|
logging.error(f"Error calling Ollama: {e}")
|
|
return {"error": str(e)}
|
|
|
|
async def read_file_with_analysis(self, file_path: str, analysis_prompt: str = "Analyze this file") -> Dict[str, Any]:
|
|
"""Read and analyze a file"""
|
|
try:
|
|
with open(file_path, 'r') as f:
|
|
content = f.read()
|
|
|
|
# Detect language from file extension
|
|
ext = Path(file_path).suffix
|
|
language_map = {
|
|
'.py': 'python',
|
|
'.rs': 'rust',
|
|
'.js': 'javascript',
|
|
'.ts': 'typescript',
|
|
'.go': 'go',
|
|
'.java': 'java',
|
|
'.cpp': 'cpp',
|
|
'.c': 'c',
|
|
}
|
|
language = language_map.get(ext, 'text')
|
|
|
|
prompt = f"{analysis_prompt}\n\nFile: {file_path}\nLanguage: {language}\n\nContent:\n{content}"
|
|
|
|
response = requests.post(
|
|
f"{self.ollama_url}/api/generate",
|
|
json={
|
|
"model": self.model,
|
|
"prompt": prompt,
|
|
"stream": False,
|
|
},
|
|
timeout=300
|
|
)
|
|
|
|
if response.status_code == 200:
|
|
result = response.json()
|
|
analysis = result.get("response", "")
|
|
return {"analysis": analysis, "file_path": file_path}
|
|
else:
|
|
return {"error": f"Analysis failed: {response.status_code}"}
|
|
|
|
except Exception as e:
|
|
return {"error": str(e)}
|
|
|
|
async def explain_code(self, code: str, language: str = "python") -> Dict[str, Any]:
|
|
"""Explain code snippet"""
|
|
prompt = f"Explain this {language} code in detail:\n\n{code}"
|
|
|
|
try:
|
|
response = requests.post(
|
|
f"{self.ollama_url}/api/generate",
|
|
json={
|
|
"model": self.model,
|
|
"prompt": prompt,
|
|
"stream": False,
|
|
},
|
|
timeout=300
|
|
)
|
|
|
|
if response.status_code == 200:
|
|
result = response.json()
|
|
explanation = result.get("response", "")
|
|
return {"explanation": explanation}
|
|
else:
|
|
return {"error": f"Explanation failed: {response.status_code}"}
|
|
|
|
except Exception as e:
|
|
return {"error": str(e)}
|
|
|
|
# Configure logging
|
|
logging.basicConfig(
|
|
level=logging.INFO,
|
|
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
|
)
|
|
logger = logging.getLogger(__name__)
|
|
|
|
class AiShellMCPServer:
|
|
def __init__(self, config_path: Optional[str] = None):
|
|
self.config = self.load_config(config_path)
|
|
self.llm_server = LocalLLMServer()
|
|
self.app = web.Application()
|
|
self.setup_routes()
|
|
|
|
def load_config(self, config_path: Optional[str]) -> Dict[str, Any]:
|
|
"""Load configuration from TOML file"""
|
|
if config_path and Path(config_path).exists():
|
|
import toml
|
|
return toml.load(config_path)
|
|
else:
|
|
# Default configuration
|
|
return {
|
|
"server": {
|
|
"host": "127.0.0.1",
|
|
"port": 8765
|
|
},
|
|
"llm": {
|
|
"ollama_host": "http://localhost:11434",
|
|
"default_model": "qwen2.5-coder:7b"
|
|
}
|
|
}
|
|
|
|
def setup_routes(self):
|
|
"""Setup HTTP routes"""
|
|
# Configure CORS
|
|
cors = aiohttp_cors.setup(self.app, defaults={
|
|
"*": aiohttp_cors.ResourceOptions(
|
|
allow_credentials=True,
|
|
expose_headers="*",
|
|
allow_headers="*",
|
|
allow_methods="*"
|
|
)
|
|
})
|
|
|
|
# Add routes
|
|
resource = self.app.router.add_resource("/execute")
|
|
cors.add(resource.add_route("POST", self.handle_execute))
|
|
|
|
resource = self.app.router.add_resource("/tools")
|
|
cors.add(resource.add_route("GET", self.handle_tools))
|
|
|
|
resource = self.app.router.add_resource("/health")
|
|
cors.add(resource.add_route("GET", self.handle_health))
|
|
|
|
async def handle_execute(self, request: web.Request) -> web.Response:
|
|
"""Execute a tool request"""
|
|
try:
|
|
data = await request.json()
|
|
method = data.get("method")
|
|
params = data.get("params", {})
|
|
context = data.get("context", {})
|
|
|
|
logger.info(f"Executing method: {method}")
|
|
|
|
# Map method to tool
|
|
if method == "code_with_local_llm":
|
|
result = await self.llm_server.code_with_local_llm(
|
|
prompt=params.get("prompt"),
|
|
language=params.get("language", "python")
|
|
)
|
|
elif method == "read_file_with_analysis":
|
|
result = await self.llm_server.read_file_with_analysis(
|
|
file_path=params.get("file_path"),
|
|
analysis_prompt=params.get("analysis_prompt", "Analyze this file")
|
|
)
|
|
elif method == "explain_code":
|
|
result = await self.llm_server.explain_code(
|
|
code=params.get("code"),
|
|
language=params.get("language", "python")
|
|
)
|
|
else:
|
|
return web.json_response({
|
|
"id": data.get("id"),
|
|
"error": f"Unknown method: {method}",
|
|
"error_code": 1001
|
|
}, status=400)
|
|
|
|
return web.json_response({
|
|
"id": data.get("id"),
|
|
"result": result,
|
|
"error": None
|
|
})
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error executing request: {e}")
|
|
return web.json_response({
|
|
"id": data.get("id", ""),
|
|
"error": str(e),
|
|
"error_code": 2001
|
|
}, status=500)
|
|
|
|
async def handle_tools(self, request: web.Request) -> web.Response:
|
|
"""Return available tools"""
|
|
tools = [
|
|
{
|
|
"name": "code_with_local_llm",
|
|
"description": "Generate code using local LLM",
|
|
"parameters": {
|
|
"prompt": "string",
|
|
"language": "string (optional)"
|
|
}
|
|
},
|
|
{
|
|
"name": "read_file_with_analysis",
|
|
"description": "Read and analyze a file",
|
|
"parameters": {
|
|
"file_path": "string",
|
|
"analysis_prompt": "string (optional)"
|
|
}
|
|
},
|
|
{
|
|
"name": "explain_code",
|
|
"description": "Explain code snippet",
|
|
"parameters": {
|
|
"code": "string",
|
|
"language": "string (optional)"
|
|
}
|
|
}
|
|
]
|
|
|
|
return web.json_response({"tools": tools})
|
|
|
|
async def handle_health(self, request: web.Request) -> web.Response:
|
|
"""Health check endpoint"""
|
|
# Check LLM connection
|
|
llm_status = "unknown"
|
|
try:
|
|
# Simple check - you might want to implement a proper health check
|
|
llm_status = "connected"
|
|
except:
|
|
llm_status = "disconnected"
|
|
|
|
return web.json_response({
|
|
"status": "ok",
|
|
"llm_status": llm_status,
|
|
"version": "0.1.0"
|
|
})
|
|
|
|
def run(self):
|
|
"""Start the server"""
|
|
host = self.config["server"]["host"]
|
|
port = self.config["server"]["port"]
|
|
|
|
logger.info(f"Starting ai.shell MCP Server on {host}:{port}")
|
|
web.run_app(self.app, host=host, port=port)
|
|
|
|
if __name__ == "__main__":
|
|
import argparse
|
|
|
|
parser = argparse.ArgumentParser(description="ai.shell MCP Server")
|
|
parser.add_argument("--config", help="Path to configuration file")
|
|
args = parser.parse_args()
|
|
|
|
server = AiShellMCPServer(config_path=args.config)
|
|
server.run() |