From 3d83f522cb67ddcba9fe5ec3a5ee586b9e584cf9 Mon Sep 17 00:00:00 2001
From: Stephen D <webmaster@scd31.com>
Date: Fri, 18 Aug 2023 16:54:54 -0300
Subject: [PATCH] refactor

---
 src/handlers/llama.rs | 102 +++++++++++++++++++++++-------------------
 1 file changed, 56 insertions(+), 46 deletions(-)

diff --git a/src/handlers/llama.rs b/src/handlers/llama.rs
index 3686aac..8a83b81 100644
--- a/src/handlers/llama.rs
+++ b/src/handlers/llama.rs
@@ -54,6 +54,60 @@ impl LlamaHandler {
 
 		Ok(resp)
 	}
+
+	async fn list_models(&self, ctx: &Context, msg: &Message) {
+		let people = self
+			.models
+			.keys()
+			.map(|x| format!("- {x}"))
+			.collect::<Vec<_>>()
+			.join("\n");
+
+		if let Err(e) = msg
+			.reply(ctx, format!("Available models:\n{}", people))
+			.await
+		{
+			eprintln!("{:?}", e);
+		}
+	}
+
+	async fn reply(&self, ctx: &Context, msg: &Message, model: &str, txt: &str) {
+		if txt.is_empty() {
+			return;
+		}
+
+		let _typing = try_or_log(|| Typing::start(ctx.http.clone(), msg.channel_id.0));
+
+		let resp = self.call_llama(model, txt).await;
+		let resp = match resp.as_ref() {
+			Ok(x) => x,
+			Err(e) => {
+				eprintln!("{e:?}");
+
+				"Could not communicate with Llama. Check the server logs for more details."
+			}
+		};
+
+		let resp = if resp.is_empty() {
+			"[No response]"
+		} else {
+			resp
+		};
+
+		// discord messages are limited to 2000 codepoints
+		let chunks: Vec<String> = resp
+			.chars()
+			.chunks(2000)
+			.into_iter()
+			.map(|chunk| chunk.collect())
+			.collect();
+
+		for chunk in chunks {
+			if let Err(e) = msg.reply(ctx, chunk).await {
+				eprintln!("{e:?}");
+			}
+		}
+	}
 }
 
 #[async_trait]
@@ -68,58 +122,14 @@ impl LineHandler for LlamaHandler {
 		let txt = &msg.content;
 
 		if txt.starts_with("!people") {
-			let people = self
-				.models
-				.keys()
-				.map(|x| format!("- {x}"))
-				.collect::<Vec<_>>()
-				.join("\n");
-
-			if let Err(e) = msg
-				.reply(ctx, format!("Available models:\n{}", people))
-				.await
-			{
-				eprintln!("{:?}", e);
-			}
+			self.list_models(ctx, msg).await;
 
 			return;
 		}
 
 		for (name, model) in &self.models {
 			if let Some(txt) = txt.strip_prefix(&format!("!{name} ")) {
-				if txt.is_empty() {
-					return;
-				}
-
-				let _typing = try_or_log(|| Typing::start(ctx.http.clone(), msg.channel_id.0));
-
-				let resp = self.call_llama(model, txt).await;
-				let resp = match resp.as_ref() {
-					Ok(x) => x,
-					Err(e) => {
-						eprintln!("{e:?}");
-
-						"Could not communicate with Llama. Check the server logs for more details."
-					}
-				};
-
-				let resp = if resp.is_empty() {
-					"[No response]"
-				} else {
-					resp
-				};
-
-				let chunks: Vec<String> = resp
-					.chars()
-					.chunks(2000)
-					.into_iter()
-					.map(|chunk| chunk.collect())
-					.collect();
-				for chunk in chunks {
-					if let Err(e) = msg.reply(ctx, chunk).await {
-						eprintln!("{e:?}");
-					}
-				}
+				self.reply(ctx, msg, model, txt).await;
 
 				return;
 			}
-- 
GitLab