diff --git a/nio-llm/client.py b/nio-llm/client.py
index b6a4d6f..ea8ec4d 100644
--- a/nio-llm/client.py
+++ b/nio-llm/client.py
@@ -22,21 +22,23 @@ class LLMClient(AsyncClient):
homeserver: str,
device_id: str,
preprompt: str,
- room: str,
ggml_path: str,
+ room: str,
):
"""Create a new LLMClient instance."""
- super().__init__(
- user=f"@{username}:{homeserver.removeprefix('https://')}",
- homeserver=homeserver,
- device_id=device_id,
- )
-
+ self.uid = f"@{username}:{homeserver.removeprefix('https://')}"
self.spawn_time = time.time() * 1000
self.username = username
self.preprompt = preprompt
self.room = room
+ # create the AsyncClient instance
+ super().__init__(
+ user=self.uid,
+ homeserver=homeserver,
+ device_id=device_id,
+ )
+
# create the Llama instance
self.llm = Llama(
model_path=ggml_path,
@@ -48,36 +50,47 @@ class LLMClient(AsyncClient):
async def message_callback(self, room: MatrixRoom, event: RoomMessageText):
"""Process new messages as they come in."""
- logger.debug(f"Received new message in room {room.room_id}.")
- logger.debug(f"Message body: {event.body}")
+ logger.debug(f"New RoomMessageText: {event.source}")
# ignore our own messages
if event.sender == self.user:
logger.debug("Ignoring our own message.")
return
- # ignore messages pre-spawn
+ # ignore messages pre-dating our spawn time
if event.server_timestamp < self.spawn_time:
logger.debug("Ignoring message pre-spawn.")
return
- # ignore messages sent in other rooms
+ # ignore messages not in our monitored room
if room.room_id != self.room:
logger.debug("Ignoring message in different room.")
return
- if self.username not in event.body:
+ # ignore edited messages
+ if "m.new_content" in event.source["content"]:
+ logger.debug("Ignoring edited message.")
+ return
+
+ # ignore messages not mentioning us
+ if not (
+ "format" in event.source["content"]
+ and "formatted_body" in event.source["content"]
+ and event.source["content"]["format"] == "org.matrix.custom.html"
+ and f'{self.username}'
+ in event.source["content"]["formatted_body"]
+ ):
logger.debug("Ignoring message not directed at us.")
return
+ # generate prompt from message
prompt = dedent(
f"""
{self.preprompt}
<{event.sender}>: {event.body}
- :
+ <{self.username}>:
""",
).strip()
-
logger.debug(f"Prompt: {prompt}")
# enable typing indicator
@@ -87,6 +100,7 @@ class LLMClient(AsyncClient):
timeout=100000000,
)
+ # generate response using llama.cpp
output = self.llm(
prompt,
max_tokens=100,