1
0

feat: Implement aishell - AI-powered shell automation tool

Implemented a generic alternative to Claude Code with the following features:

Core Implementation:
- Multi-LLM provider support (OpenAI compatible APIs)
- Function calling for direct tool execution by LLM
- Interactive REPL shell interface
- MCP server mode for Claude Desktop integration
- Shell executor with bash, read, write, list tools

Architecture:
- src/cli: Interactive REPL implementation
- src/llm: LLM provider abstraction (OpenAI compatible)
- src/shell: Shell execution engine with duct
- src/mcp: MCP server for Claude Desktop
- src/config: Configuration management

Technical Stack:
- Rust 2021 with tokio async runtime
- clap for CLI framework
- reqwest for HTTP client
- duct for shell execution
- rustyline for REPL interface

This tool integrates with aigpt to form AIOS (AI Operating System),
enabling AI-driven OS management and automation.

Based on aigpt architecture for CLI and MCP patterns.
This commit is contained in:
Claude
2025-11-09 07:12:08 +00:00
parent b2433493b2
commit a50fef9182
16 changed files with 1219 additions and 5 deletions

18
src/llm/mod.rs Normal file
View File

@@ -0,0 +1,18 @@
pub mod provider;
pub mod openai;
pub use provider::{LLMProvider, Message, Role, ToolCall, ToolDefinition, ChatResponse};
pub use openai::OpenAIProvider;
use anyhow::Result;
/// Create an LLM provider based on the provider name
pub async fn create_provider(provider: &str, model: Option<&str>) -> Result<Box<dyn LLMProvider>> {
match provider.to_lowercase().as_str() {
"openai" => {
let provider = OpenAIProvider::new(model)?;
Ok(Box::new(provider))
}
_ => anyhow::bail!("Unsupported provider: {}", provider),
}
}

126
src/llm/openai.rs Normal file
View File

@@ -0,0 +1,126 @@
use anyhow::{Context, Result};
use async_trait::async_trait;
use reqwest::Client;
use serde::{Deserialize, Serialize};
use std::env;
use super::provider::{ChatResponse, LLMProvider, Message, ToolCall, ToolDefinition};
#[derive(Debug, Serialize)]
struct ChatRequest {
model: String,
messages: Vec<Message>,
#[serde(skip_serializing_if = "Option::is_none")]
tools: Option<Vec<ToolDefinition>>,
#[serde(skip_serializing_if = "Option::is_none")]
tool_choice: Option<String>,
}
#[derive(Debug, Deserialize)]
struct ChatCompletionResponse {
choices: Vec<Choice>,
}
#[derive(Debug, Deserialize)]
struct Choice {
message: ResponseMessage,
finish_reason: String,
}
#[derive(Debug, Deserialize)]
struct ResponseMessage {
#[serde(default)]
content: Option<String>,
#[serde(default)]
tool_calls: Option<Vec<ToolCall>>,
}
pub struct OpenAIProvider {
client: Client,
api_key: String,
base_url: String,
model: String,
}
impl OpenAIProvider {
pub fn new(model: Option<&str>) -> Result<Self> {
let api_key = env::var("OPENAI_API_KEY")
.context("OPENAI_API_KEY environment variable not set")?;
let base_url = env::var("OPENAI_BASE_URL")
.unwrap_or_else(|_| "https://api.openai.com/v1".to_string());
let model = model
.map(|s| s.to_string())
.or_else(|| env::var("OPENAI_MODEL").ok())
.unwrap_or_else(|| "gpt-4".to_string());
Ok(Self {
client: Client::new(),
api_key,
base_url,
model,
})
}
}
#[async_trait]
impl LLMProvider for OpenAIProvider {
async fn chat(
&self,
messages: Vec<Message>,
tools: Option<Vec<ToolDefinition>>,
) -> Result<ChatResponse> {
let url = format!("{}/chat/completions", self.base_url);
let tool_choice = if tools.is_some() {
Some("auto".to_string())
} else {
None
};
let request = ChatRequest {
model: self.model.clone(),
messages,
tools,
tool_choice,
};
let response = self
.client
.post(&url)
.header("Authorization", format!("Bearer {}", self.api_key))
.header("Content-Type", "application/json")
.json(&request)
.send()
.await
.context("Failed to send request to OpenAI API")?;
if !response.status().is_success() {
let status = response.status();
let error_text = response.text().await.unwrap_or_default();
anyhow::bail!("OpenAI API error ({}): {}", status, error_text);
}
let completion: ChatCompletionResponse = response
.json()
.await
.context("Failed to parse OpenAI API response")?;
let choice = completion
.choices
.into_iter()
.next()
.context("No choices in response")?;
Ok(ChatResponse {
content: choice.message.content.unwrap_or_default(),
tool_calls: choice.message.tool_calls,
finish_reason: choice.finish_reason,
})
}
fn model_name(&self) -> &str {
&self.model
}
}

104
src/llm/provider.rs Normal file
View File

@@ -0,0 +1,104 @@
use anyhow::Result;
use async_trait::async_trait;
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum Role {
System,
User,
Assistant,
Tool,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Message {
pub role: Role,
pub content: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub tool_calls: Option<Vec<ToolCall>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub tool_call_id: Option<String>,
}
impl Message {
pub fn system(content: impl Into<String>) -> Self {
Self {
role: Role::System,
content: content.into(),
tool_calls: None,
tool_call_id: None,
}
}
pub fn user(content: impl Into<String>) -> Self {
Self {
role: Role::User,
content: content.into(),
tool_calls: None,
tool_call_id: None,
}
}
pub fn assistant(content: impl Into<String>) -> Self {
Self {
role: Role::Assistant,
content: content.into(),
tool_calls: None,
tool_call_id: None,
}
}
pub fn tool(content: impl Into<String>, tool_call_id: String) -> Self {
Self {
role: Role::Tool,
content: content.into(),
tool_calls: None,
tool_call_id: Some(tool_call_id),
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ToolCall {
pub id: String,
#[serde(rename = "type")]
pub call_type: String,
pub function: FunctionCall,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct FunctionCall {
pub name: String,
pub arguments: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ToolDefinition {
#[serde(rename = "type")]
pub tool_type: String,
pub function: FunctionDefinition,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct FunctionDefinition {
pub name: String,
pub description: String,
pub parameters: serde_json::Value,
}
#[derive(Debug)]
pub struct ChatResponse {
pub content: String,
pub tool_calls: Option<Vec<ToolCall>>,
pub finish_reason: String,
}
#[async_trait]
pub trait LLMProvider: Send + Sync {
/// Send a chat completion request
async fn chat(&self, messages: Vec<Message>, tools: Option<Vec<ToolDefinition>>) -> Result<ChatResponse>;
/// Get the model name
fn model_name(&self) -> &str;
}