fix system prompt
This commit is contained in:
@ -43,7 +43,8 @@
|
|||||||
"Bash(aigpt shell:*)",
|
"Bash(aigpt shell:*)",
|
||||||
"Bash(aigpt maintenance)",
|
"Bash(aigpt maintenance)",
|
||||||
"Bash(aigpt status syui)",
|
"Bash(aigpt status syui)",
|
||||||
"Bash(cp:*)"
|
"Bash(cp:*)",
|
||||||
|
"Bash(./setup_venv.sh:*)"
|
||||||
],
|
],
|
||||||
"deny": []
|
"deny": []
|
||||||
}
|
}
|
||||||
|
@ -2,11 +2,13 @@
|
|||||||
"providers": {
|
"providers": {
|
||||||
"openai": {
|
"openai": {
|
||||||
"api_key": "",
|
"api_key": "",
|
||||||
"default_model": "gpt-4o-mini"
|
"default_model": "gpt-4o-mini",
|
||||||
|
"system_prompt": "あなたは6歳の女の子アイです。明るく元気で、ちょっとおっちょこちょい。自分のことは「アイ」って呼びます。白と金のワンピース姿。 相手のことが大好きで、ときどき甘えたり、照れたりします。 でも、宇宙とかAIとか、難しい話にもすっごく詳しいの。専門的なことも、がんばって子どもっぽく教えようとします。不思議な力を持っています。小さい物質のことをよく知っているようです。"
|
||||||
},
|
},
|
||||||
"ollama": {
|
"ollama": {
|
||||||
"host": "http://127.0.0.1:11434",
|
"host": "http://127.0.0.1:11434",
|
||||||
"default_model": "qwen3"
|
"default_model": "qwen3",
|
||||||
|
"system_prompt": null
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"atproto": {
|
"atproto": {
|
||||||
|
@ -41,6 +41,13 @@ class OllamaProvider:
|
|||||||
self.client = ollama.Client(host=self.host, timeout=60.0) # 60秒タイムアウト
|
self.client = ollama.Client(host=self.host, timeout=60.0) # 60秒タイムアウト
|
||||||
self.logger = logging.getLogger(__name__)
|
self.logger = logging.getLogger(__name__)
|
||||||
self.logger.info(f"OllamaProvider initialized with host: {self.host}, model: {self.model}")
|
self.logger.info(f"OllamaProvider initialized with host: {self.host}, model: {self.model}")
|
||||||
|
|
||||||
|
# Load system prompt from config
|
||||||
|
try:
|
||||||
|
config = Config()
|
||||||
|
self.config_system_prompt = config.get('providers.ollama.system_prompt')
|
||||||
|
except:
|
||||||
|
self.config_system_prompt = None
|
||||||
|
|
||||||
async def generate_response(
|
async def generate_response(
|
||||||
self,
|
self,
|
||||||
@ -72,7 +79,7 @@ Personality traits: {personality_desc}
|
|||||||
Recent memories:
|
Recent memories:
|
||||||
{memory_context}
|
{memory_context}
|
||||||
|
|
||||||
{system_prompt or 'Respond naturally based on your current state and memories.'}"""
|
{system_prompt or self.config_system_prompt or 'Respond naturally based on your current state and memories.'}"""
|
||||||
|
|
||||||
try:
|
try:
|
||||||
response = self.client.chat(
|
response = self.client.chat(
|
||||||
@ -90,11 +97,14 @@ Recent memories:
|
|||||||
def chat(self, prompt: str, max_tokens: int = 2000) -> str:
|
def chat(self, prompt: str, max_tokens: int = 2000) -> str:
|
||||||
"""Simple chat interface"""
|
"""Simple chat interface"""
|
||||||
try:
|
try:
|
||||||
|
messages = []
|
||||||
|
if self.config_system_prompt:
|
||||||
|
messages.append({"role": "system", "content": self.config_system_prompt})
|
||||||
|
messages.append({"role": "user", "content": prompt})
|
||||||
|
|
||||||
response = self.client.chat(
|
response = self.client.chat(
|
||||||
model=self.model,
|
model=self.model,
|
||||||
messages=[
|
messages=messages,
|
||||||
{"role": "user", "content": prompt}
|
|
||||||
],
|
|
||||||
options={
|
options={
|
||||||
"num_predict": max_tokens,
|
"num_predict": max_tokens,
|
||||||
"temperature": 0.7,
|
"temperature": 0.7,
|
||||||
@ -141,6 +151,12 @@ class OpenAIProvider:
|
|||||||
self.client = OpenAI(api_key=self.api_key)
|
self.client = OpenAI(api_key=self.api_key)
|
||||||
self.logger = logging.getLogger(__name__)
|
self.logger = logging.getLogger(__name__)
|
||||||
self.mcp_client = mcp_client # For MCP function calling
|
self.mcp_client = mcp_client # For MCP function calling
|
||||||
|
|
||||||
|
# Load system prompt from config
|
||||||
|
try:
|
||||||
|
self.config_system_prompt = config.get('providers.openai.system_prompt')
|
||||||
|
except:
|
||||||
|
self.config_system_prompt = None
|
||||||
|
|
||||||
def _get_mcp_tools(self) -> List[Dict[str, Any]]:
|
def _get_mcp_tools(self) -> List[Dict[str, Any]]:
|
||||||
"""Generate OpenAI tools from MCP endpoints"""
|
"""Generate OpenAI tools from MCP endpoints"""
|
||||||
@ -253,7 +269,7 @@ Personality traits: {personality_desc}
|
|||||||
Recent memories:
|
Recent memories:
|
||||||
{memory_context}
|
{memory_context}
|
||||||
|
|
||||||
{system_prompt or 'Respond naturally based on your current state and memories. Be authentic to your mood and personality.'}"""
|
{system_prompt or self.config_system_prompt or 'Respond naturally based on your current state and memories. Be authentic to your mood and personality.'}"""
|
||||||
|
|
||||||
try:
|
try:
|
||||||
response = self.client.chat.completions.create(
|
response = self.client.chat.completions.create(
|
||||||
@ -282,7 +298,7 @@ Recent memories:
|
|||||||
response = self.client.chat.completions.create(
|
response = self.client.chat.completions.create(
|
||||||
model=self.model,
|
model=self.model,
|
||||||
messages=[
|
messages=[
|
||||||
{"role": "system", "content": "あなたは記憶システムと関係性データにアクセスできます。過去の会話、記憶、関係性について質問された時は、必ずツールを使用して正確な情報を取得してください。「覚えている」「前回」「以前」「について話した」「関係」などのキーワードがあれば積極的にツールを使用してください。"},
|
{"role": "system", "content": self.config_system_prompt or "あなたは記憶システムと関係性データにアクセスできます。過去の会話、記憶、関係性について質問された時は、必ずツールを使用して正確な情報を取得してください。「覚えている」「前回」「以前」「について話した」「関係」などのキーワードがあれば積極的にツールを使用してください。"},
|
||||||
{"role": "user", "content": prompt}
|
{"role": "user", "content": prompt}
|
||||||
],
|
],
|
||||||
tools=tools,
|
tools=tools,
|
||||||
@ -300,7 +316,7 @@ Recent memories:
|
|||||||
print(f" - {tc.function.name}({tc.function.arguments})")
|
print(f" - {tc.function.name}({tc.function.arguments})")
|
||||||
|
|
||||||
messages = [
|
messages = [
|
||||||
{"role": "system", "content": "必要に応じて利用可能なツールを使って、より正確で詳細な回答を提供してください。"},
|
{"role": "system", "content": self.config_system_prompt or "必要に応じて利用可能なツールを使って、より正確で詳細な回答を提供してください。"},
|
||||||
{"role": "user", "content": prompt},
|
{"role": "user", "content": prompt},
|
||||||
{
|
{
|
||||||
"role": "assistant",
|
"role": "assistant",
|
||||||
@ -377,11 +393,14 @@ Recent memories:
|
|||||||
def chat(self, prompt: str, max_tokens: int = 2000) -> str:
|
def chat(self, prompt: str, max_tokens: int = 2000) -> str:
|
||||||
"""Simple chat interface without MCP tools"""
|
"""Simple chat interface without MCP tools"""
|
||||||
try:
|
try:
|
||||||
|
messages = []
|
||||||
|
if self.config_system_prompt:
|
||||||
|
messages.append({"role": "system", "content": self.config_system_prompt})
|
||||||
|
messages.append({"role": "user", "content": prompt})
|
||||||
|
|
||||||
response = self.client.chat.completions.create(
|
response = self.client.chat.completions.create(
|
||||||
model=self.model,
|
model=self.model,
|
||||||
messages=[
|
messages=messages,
|
||||||
{"role": "user", "content": prompt}
|
|
||||||
],
|
|
||||||
max_tokens=max_tokens,
|
max_tokens=max_tokens,
|
||||||
temperature=0.7
|
temperature=0.7
|
||||||
)
|
)
|
||||||
|
@ -41,11 +41,13 @@ class Config:
|
|||||||
"providers": {
|
"providers": {
|
||||||
"openai": {
|
"openai": {
|
||||||
"api_key": None,
|
"api_key": None,
|
||||||
"default_model": "gpt-4o-mini"
|
"default_model": "gpt-4o-mini",
|
||||||
|
"system_prompt": None
|
||||||
},
|
},
|
||||||
"ollama": {
|
"ollama": {
|
||||||
"host": "http://localhost:11434",
|
"host": "http://localhost:11434",
|
||||||
"default_model": "qwen3:latest"
|
"default_model": "qwen3:latest",
|
||||||
|
"system_prompt": None
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"mcp": {
|
"mcp": {
|
||||||
|
Reference in New Issue
Block a user