This commit is contained in:
syui 2025-05-19 18:51:18 +09:00
commit ea3b8e71b3
Signed by: syui
GPG Key ID: 5417CFEBAD92DF56
8 changed files with 121 additions and 0 deletions

2
.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
**target
**.lock

5
README.md Normal file
View File

@ -0,0 +1,5 @@
# ai gpt
## send

42
agent/main.rs Normal file
View File

@ -0,0 +1,42 @@
use std::env;
use std::process::{Command, Stdio};
use std::io::{self, Write};
fn main() {
let args: Vec<String> = env::args().collect();
if args.len() < 2 {
eprintln!("Usage: langchain_cli <prompt>");
std::process::exit(1);
}
let prompt = &args[1];
// Simulate a pipeline stage: e.g., tokenization, reasoning, response generation
let stages = vec!["Tokenize", "Reason", "Generate"];
for stage in &stages {
println!("[Stage: {}] Processing...", stage);
}
// Example call to Python-based LangChain (assuming you have a script or API to call)
// For placeholder purposes, we echo the prompt back.
let output = Command::new("python3")
.arg("-c")
.arg(format!("print(\"LangChain Agent Response for: {}\")", prompt))
.stdout(Stdio::piped())
.spawn()
.expect("failed to execute process")
.wait_with_output()
.expect("failed to wait on child");
io::stdout().write_all(&output.stdout).unwrap();
}
/*
TODO (for future LangChain-style pipeline):
1. Implement trait-based agent components: Tokenizer, Retriever, Reasoner, Generator.
2. Allow config via YAML or TOML to define chain flow.
3. Async pipeline support with Tokio.
4. Optional integration with LLM APIs (OpenAI, Ollama, etc).
5. Rust-native vector search (e.g. using `tantivy`, `qdrant-client`).
*/

BIN
img/ai_r_01.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.9 MiB

BIN
img/ai_r_02.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.8 MiB

9
post/2025-05-18-gpt.json Normal file

File diff suppressed because one or more lines are too long

7
self_talk_ai/Cargo.toml Normal file
View File

@ -0,0 +1,7 @@
[package]
name = "self_talk_ai"
version = "0.1.0"
edition = "2021"
[dependencies]
chrono = "0.4"

56
self_talk_ai/src/main.rs Normal file
View File

@ -0,0 +1,56 @@
use chrono::{Duration, Local, NaiveDateTime};
#[derive(Debug)]
struct AIState {
relation_score: f32,
previous_score: f32,
decay_rate: f32,
sensitivity: f32,
message_threshold: f32,
last_message_time: NaiveDateTime,
}
impl AIState {
fn update(&mut self, now: NaiveDateTime) {
let days_passed = (now - self.last_message_time).num_days() as f32;
let decay = self.decay_rate * days_passed;
self.previous_score = self.relation_score;
self.relation_score -= decay;
self.relation_score = self.relation_score.clamp(0.0, 100.0);
}
fn should_talk(&self) -> bool {
let delta = self.previous_score - self.relation_score;
delta > self.message_threshold && self.sensitivity > 0.5
}
fn generate_message(&self) -> String {
let _delta = self.previous_score - self.relation_score;
match self.relation_score as i32 {
80..=100 => "ふふっ、最近どうしてる?会いたくなっちゃった!".to_string(),
60..=79 => "ちょっとだけ、さみしかったんだよ?".to_string(),
40..=59 => "えっと……話せる時間ある?".to_string(),
_ => "ううん、もしかして私のこと、忘れちゃったのかな……".to_string(),
}
}
}
fn main() {
let now = Local::now().naive_local();
let mut ai = AIState {
relation_score: 80.0,
previous_score: 80.0,
decay_rate: 1.5, // 1日あたりの減少スコア
sensitivity: 0.8,
message_threshold: 5.0,
last_message_time: now - Duration::days(4), // 最後の会話から4日経過
};
ai.update(now);
if ai.should_talk() {
println!("AI発話: {}", ai.generate_message());
} else {
println!("まだ何も言わずにそっとしているようです...");
}
}