Implement AI memory system with psychological priority scoring

Core changes:
- Add AI interpreter module for content interpretation and priority scoring
- Extend Memory struct with interpreted_content, priority_score (f32: 0.0-1.0), and user_context
- Implement automatic memory pruning based on priority scores
- Add capacity management (default: 100 memories max)
- Create comprehensive design documentation

Technical details:
- Changed priority_score from u8 (1-100) to f32 (0.0-1.0) for better AI compatibility
- Add create_memory_with_ai() method for AI-enhanced memory creation
- Implement get_memories_by_priority() for priority-based sorting
- Score evaluation criteria: emotional impact, user relevance, novelty, utility

Philosophy:
This implements a "psychological priority memory system" where AI interprets
and evaluates memories rather than storing raw content. Inspired by how human
memory works - interpreting and prioritizing rather than perfect recording.
This commit is contained in:
Claude
2025-11-05 14:09:39 +00:00
parent 62b91e5e5a
commit fd97ba2d81
5 changed files with 378 additions and 7 deletions

143
src/ai_interpreter.rs Normal file
View File

@@ -0,0 +1,143 @@
use anyhow::{Context, Result};
#[cfg(feature = "ai-analysis")]
use openai::{
chat::{ChatCompletion, ChatCompletionMessage, ChatCompletionMessageRole},
set_key,
};
pub struct AIInterpreter {
#[cfg(feature = "ai-analysis")]
api_key: Option<String>,
}
impl AIInterpreter {
pub fn new() -> Self {
#[cfg(feature = "ai-analysis")]
{
let api_key = std::env::var("OPENAI_API_KEY").ok();
if let Some(ref key) = api_key {
set_key(key.clone());
}
AIInterpreter { api_key }
}
#[cfg(not(feature = "ai-analysis"))]
{
AIInterpreter {}
}
}
/// AI解釈: 元のコンテンツを解釈して新しい表現を生成
#[cfg(feature = "ai-analysis")]
pub async fn interpret_content(&self, content: &str) -> Result<String> {
if self.api_key.is_none() {
return Ok(content.to_string());
}
let messages = vec![
ChatCompletionMessage {
role: ChatCompletionMessageRole::System,
content: Some("あなたは記憶を解釈するAIです。与えられたテキストを解釈し、より深い意味や文脈を抽出してください。元のテキストの本質を保ちながら、新しい視点や洞察を加えてください。".to_string()),
name: None,
function_call: None,
},
ChatCompletionMessage {
role: ChatCompletionMessageRole::User,
content: Some(format!("以下のテキストを解釈してください:\n\n{}", content)),
name: None,
function_call: None,
},
];
let chat_completion = ChatCompletion::builder("gpt-3.5-turbo", messages.clone())
.create()
.await
.context("Failed to create chat completion")?;
let response = chat_completion
.choices
.first()
.and_then(|choice| choice.message.content.clone())
.unwrap_or_else(|| content.to_string());
Ok(response)
}
#[cfg(not(feature = "ai-analysis"))]
pub async fn interpret_content(&self, content: &str) -> Result<String> {
Ok(content.to_string())
}
/// 心理判定: テキストの重要度を0.0-1.0のスコアで評価
#[cfg(feature = "ai-analysis")]
pub async fn calculate_priority_score(&self, content: &str, user_context: Option<&str>) -> Result<f32> {
if self.api_key.is_none() {
return Ok(0.5); // デフォルトスコア
}
let context_info = user_context
.map(|ctx| format!("\n\nユーザーコンテキスト: {}", ctx))
.unwrap_or_default();
let messages = vec![
ChatCompletionMessage {
role: ChatCompletionMessageRole::System,
content: Some(format!(
"あなたは記憶の重要度を評価するAIです。以下の基準で0.0-1.0のスコアをつけてください:\n\
- 感情的インパクト (0.0-0.25)\n\
- ユーザーとの関連性 (0.0-0.25)\n\
- 新規性・独自性 (0.0-0.25)\n\
- 実用性 (0.0-0.25)\n\n\
スコアのみを小数で返してください。例: 0.75{}", context_info
)),
name: None,
function_call: None,
},
ChatCompletionMessage {
role: ChatCompletionMessageRole::User,
content: Some(format!("以下のテキストの重要度を評価してください:\n\n{}", content)),
name: None,
function_call: None,
},
];
let chat_completion = ChatCompletion::builder("gpt-3.5-turbo", messages.clone())
.create()
.await
.context("Failed to create chat completion")?;
let response = chat_completion
.choices
.first()
.and_then(|choice| choice.message.content.clone())
.unwrap_or_else(|| "0.5".to_string());
// スコアを抽出(小数を含む数字)
let score = response
.trim()
.parse::<f32>()
.unwrap_or(0.5)
.min(1.0)
.max(0.0);
Ok(score)
}
#[cfg(not(feature = "ai-analysis"))]
pub async fn calculate_priority_score(&self, _content: &str, _user_context: Option<&str>) -> Result<f32> {
Ok(0.5) // デフォルトスコア
}
/// AI解釈と心理判定を同時に実行
pub async fn analyze(&self, content: &str, user_context: Option<&str>) -> Result<(String, f32)> {
let interpreted = self.interpret_content(content).await?;
let score = self.calculate_priority_score(content, user_context).await?;
Ok((interpreted, score))
}
}
impl Default for AIInterpreter {
fn default() -> Self {
Self::new()
}
}

View File

@@ -1,2 +1,3 @@
pub mod memory;
pub mod mcp;
pub mod mcp;
pub mod ai_interpreter;

View File

@@ -4,11 +4,15 @@ use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::path::PathBuf;
use uuid::Uuid;
use crate::ai_interpreter::AIInterpreter;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Memory {
pub id: String,
pub content: String,
pub interpreted_content: String, // AI解釈後のコンテンツ
pub priority_score: f32, // 心理判定スコア (0.0-1.0)
pub user_context: Option<String>, // ユーザー固有性
pub created_at: DateTime<Utc>,
pub updated_at: DateTime<Utc>,
}
@@ -67,6 +71,9 @@ pub struct MemoryManager {
memories: HashMap<String, Memory>,
conversations: HashMap<String, Conversation>,
data_file: PathBuf,
max_memories: usize, // 最大記憶数
min_priority_score: f32, // 最小優先度スコア (0.0-1.0)
ai_interpreter: AIInterpreter, // AI解釈エンジン
}
impl MemoryManager {
@@ -91,23 +98,68 @@ impl MemoryManager {
memories,
conversations,
data_file,
max_memories: 100, // デフォルト: 100件
min_priority_score: 0.3, // デフォルト: 0.3以上
ai_interpreter: AIInterpreter::new(),
})
}
pub fn create_memory(&mut self, content: &str) -> Result<String> {
let id = Uuid::new_v4().to_string();
let now = Utc::now();
let memory = Memory {
id: id.clone(),
content: content.to_string(),
interpreted_content: content.to_string(), // 後でAI解釈を実装
priority_score: 0.5, // 後で心理判定を実装
user_context: None,
created_at: now,
updated_at: now,
};
self.memories.insert(id.clone(), memory);
// 容量制限チェック
self.prune_memories_if_needed()?;
self.save_data()?;
Ok(id)
}
/// AI解釈と心理判定を使った記憶作成
pub async fn create_memory_with_ai(
&mut self,
content: &str,
user_context: Option<&str>,
) -> Result<String> {
let id = Uuid::new_v4().to_string();
let now = Utc::now();
// AI解釈と心理判定を実行
let (interpreted_content, priority_score) = self
.ai_interpreter
.analyze(content, user_context)
.await?;
let memory = Memory {
id: id.clone(),
content: content.to_string(),
interpreted_content,
priority_score,
user_context: user_context.map(|s| s.to_string()),
created_at: now,
updated_at: now,
};
self.memories.insert(id.clone(), memory);
// 容量制限チェック
self.prune_memories_if_needed()?;
self.save_data()?;
Ok(id)
}
@@ -131,6 +183,34 @@ impl MemoryManager {
}
}
// 容量制限: 優先度が低いものから削除
fn prune_memories_if_needed(&mut self) -> Result<()> {
if self.memories.len() <= self.max_memories {
return Ok(());
}
// 優先度でソートして、低いものから削除
let mut sorted_memories: Vec<_> = self.memories.iter()
.map(|(id, mem)| (id.clone(), mem.priority_score))
.collect();
sorted_memories.sort_by(|a, b| a.1.cmp(&b.1));
let to_remove = self.memories.len() - self.max_memories;
for (id, _) in sorted_memories.iter().take(to_remove) {
self.memories.remove(id);
}
Ok(())
}
// 優先度順に記憶を取得
pub fn get_memories_by_priority(&self) -> Vec<&Memory> {
let mut memories: Vec<_> = self.memories.values().collect();
memories.sort_by(|a, b| b.priority_score.cmp(&a.priority_score));
memories
}
pub fn search_memories(&self, query: &str) -> Vec<&Memory> {
let query_lower = query.to_lowercase();
let mut results: Vec<_> = self.memories