Skip to content
Snippets Groups Projects
Commit 3d83f522 authored by Stephen D's avatar Stephen D
Browse files

refactor

parent afd9c755
No related branches found
No related tags found
Loading
Checking pipeline status
...@@ -54,6 +54,60 @@ impl LlamaHandler { ...@@ -54,6 +54,60 @@ impl LlamaHandler {
Ok(resp) Ok(resp)
} }
async fn list_models(&self, ctx: &Context, msg: &Message) {
let people = self
.models
.keys()
.map(|x| format!("- {x}"))
.collect::<Vec<_>>()
.join("\n");
if let Err(e) = msg
.reply(ctx, format!("Available models:\n{}", people))
.await
{
eprintln!("{:?}", e);
}
}
async fn reply(&self, ctx: &Context, msg: &Message, model: &str, txt: &str) {
if txt.is_empty() {
return;
}
let _typing = try_or_log(|| Typing::start(ctx.http.clone(), msg.channel_id.0));
let resp = self.call_llama(model, txt).await;
let resp = match resp.as_ref() {
Ok(x) => x,
Err(e) => {
eprintln!("{e:?}");
"Could not communicate with Llama. Check the server logs for more details."
}
};
let resp = if resp.is_empty() {
"[No response]"
} else {
resp
};
// discord messages are limited to 2000 codepoints
let chunks: Vec<String> = resp
.chars()
.chunks(2000)
.into_iter()
.map(|chunk| chunk.collect())
.collect();
for chunk in chunks {
if let Err(e) = msg.reply(ctx, chunk).await {
eprintln!("{e:?}");
}
}
}
} }
#[async_trait] #[async_trait]
...@@ -68,58 +122,14 @@ impl LineHandler for LlamaHandler { ...@@ -68,58 +122,14 @@ impl LineHandler for LlamaHandler {
let txt = &msg.content; let txt = &msg.content;
if txt.starts_with("!people") { if txt.starts_with("!people") {
let people = self self.list_models(ctx, msg).await;
.models
.keys()
.map(|x| format!("- {x}"))
.collect::<Vec<_>>()
.join("\n");
if let Err(e) = msg
.reply(ctx, format!("Available models:\n{}", people))
.await
{
eprintln!("{:?}", e);
}
return; return;
} }
for (name, model) in &self.models { for (name, model) in &self.models {
if let Some(txt) = txt.strip_prefix(&format!("!{name} ")) { if let Some(txt) = txt.strip_prefix(&format!("!{name} ")) {
if txt.is_empty() { self.reply(ctx, msg, model, txt).await;
return;
}
let _typing = try_or_log(|| Typing::start(ctx.http.clone(), msg.channel_id.0));
let resp = self.call_llama(model, txt).await;
let resp = match resp.as_ref() {
Ok(x) => x,
Err(e) => {
eprintln!("{e:?}");
"Could not communicate with Llama. Check the server logs for more details."
}
};
let resp = if resp.is_empty() {
"[No response]"
} else {
resp
};
let chunks: Vec<String> = resp
.chars()
.chunks(2000)
.into_iter()
.map(|chunk| chunk.collect())
.collect();
for chunk in chunks {
if let Err(e) = msg.reply(ctx, chunk).await {
eprintln!("{e:?}");
}
}
return; return;
} }
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment