🐛 temporarily encapsulate message_callback's inside logic inside a try/catch

This commit is contained in:
Laureηt 2023-10-21 10:26:17 +00:00
parent f92a20b2c5
commit 95c751ea30
Signed by: Laurent
SSH key fingerprint: SHA256:kZEpW8cMJ54PDeCvOhzreNr4FSh6R13CMGH/POoO8DI

View file

@ -108,117 +108,120 @@ class LLMClient(AsyncClient):
event (`RoomMessageText`):
The message event.
"""
logger.debug(f"New RoomMessageText: {event.source}")
try:
logger.debug(f"New RoomMessageText: {event.source}")
# ignore messages pre-dating our spawn time
if event.server_timestamp < self.spawn_time:
logger.debug("Ignoring message pre-spawn.")
return
# ignore messages pre-dating our spawn time
if event.server_timestamp < self.spawn_time:
logger.debug("Ignoring message pre-spawn.")
return
# ignore messages not in our monitored room
if room.room_id != self.room:
logger.debug("Ignoring message in different room.")
return
# ignore messages not in our monitored room
if room.room_id != self.room:
logger.debug("Ignoring message in different room.")
return
# ignore edited messages
if "m.new_content" in event.source["content"]:
logger.debug("Ignoring edited message.")
return
# ignore edited messages
if "m.new_content" in event.source["content"]:
logger.debug("Ignoring edited message.")
return
# ignore thread messages
if (
"m.relates_to" in event.source["content"]
and "rel_type" in event.source["content"]["m.relates_to"]
and event.source["content"]["m.relates_to"]["rel_type"] == "m.thread"
):
logger.debug("Ignoring thread message.")
return
# ignore thread messages
if (
"m.relates_to" in event.source["content"]
and "rel_type" in event.source["content"]["m.relates_to"]
and event.source["content"]["m.relates_to"]["rel_type"] == "m.thread"
):
logger.debug("Ignoring thread message.")
return
# update history
self.history.append(event)
logger.debug(f"Updated history: {self.history}")
# update history
self.history.append(event)
logger.debug(f"Updated history: {self.history}")
# update read receipt
await self.room_read_markers(
room_id=self.room,
fully_read_event=event.event_id,
read_event=event.event_id,
)
logger.debug(f"Updated read receipt to event: {event.event_id}")
# update read receipt
await self.room_read_markers(
room_id=self.room,
fully_read_event=event.event_id,
read_event=event.event_id,
)
logger.debug(f"Updated read receipt to event: {event.event_id}")
# ignore our own messages
if event.sender == self.user:
logger.debug("Ignoring our own message.")
return
# ignore our own messages
if event.sender == self.user:
logger.debug("Ignoring our own message.")
return
# ignore messages not mentioning us
if not (
"format" in event.source["content"]
and "formatted_body" in event.source["content"]
and event.source["content"]["format"] == "org.matrix.custom.html"
and f'<a href="https://matrix.to/#/{self.uid}">{self.username}</a>'
in event.source["content"]["formatted_body"]
):
logger.debug("Ignoring message not mentioning us.")
return
# ignore messages not mentioning us
if not (
"format" in event.source["content"]
and "formatted_body" in event.source["content"]
and event.source["content"]["format"] == "org.matrix.custom.html"
and f'<a href="https://matrix.to/#/{self.uid}">{self.username}</a>'
in event.source["content"]["formatted_body"]
):
logger.debug("Ignoring message not mentioning us.")
return
# start typing indicator loop
typing_task = asyncio.create_task(self.typing_loop())
# start typing indicator loop
typing_task = asyncio.create_task(self.typing_loop())
# generate response using llama.cpp
response = await openai.ChatCompletion.acreate(
model="local-model",
messages=[
{
"content": self.preprompt,
"role": "system",
},
*[
# generate response using llama.cpp
response = await openai.ChatCompletion.acreate(
model="local-model",
messages=[
{
"content": f"<{message.sender}>: {message.body}",
"role": "assistant" if message.sender == self.uid else "user",
}
for message in self.history
"content": self.preprompt,
"role": "system",
},
*[
{
"content": f"<{message.sender}>: {message.body}",
"role": "assistant" if message.sender == self.uid else "user",
}
for message in self.history
],
],
],
temperature=self.openai_temperature,
max_tokens=self.openai_max_tokens,
)
logger.debug(f"Generated response: {response}")
temperature=self.openai_temperature,
max_tokens=self.openai_max_tokens,
)
logger.debug(f"Generated response: {response}")
# retreive the response
output = response["choices"][0]["message"]["content"].strip() # type: ignore
# retreive the response
output = response["choices"][0]["message"]["content"].strip() # type: ignore
# strip the bot's uid from the response
output = output.removeprefix(f"<{self.uid}>:").strip()
# strip the bot's uid from the response
output = output.removeprefix(f"<{self.uid}>:").strip()
# detect mentions and replace them with html mentions
formatted_output = re.sub(
r"@[a-zA-Z-_]+:[^.]+\.[a-zA-Z]+",
lambda match: f'<a href="https://matrix.to/#/{match.group(0)}"></a>',
output,
)
# detect mentions and replace them with html mentions
formatted_output = re.sub(
r"@[a-zA-Z-_]+:[^.]+\.[a-zA-Z]+",
lambda match: f'<a href="https://matrix.to/#/{match.group(0)}"></a>',
output,
)
# replace newlines with <br>
formatted_output = formatted_output.replace("\n", "<br>")
# replace newlines with <br>
formatted_output = formatted_output.replace("\n", "<br>")
logger.debug(f"Formatted response: {formatted_output}")
logger.debug(f"Formatted response: {formatted_output}")
# send the response
await self.room_send(
room_id=self.room,
message_type="m.room.message",
content={
"msgtype": "m.text",
"body": output,
"format": "org.matrix.custom.html",
"formatted_body": formatted_output,
},
)
logger.debug(f"Sent response: {output}")
# send the response
await self.room_send(
room_id=self.room,
message_type="m.room.message",
content={
"msgtype": "m.text",
"body": output,
"format": "org.matrix.custom.html",
"formatted_body": formatted_output,
},
)
logger.debug(f"Sent response: {output}")
# stop typing indicator loop
typing_task.cancel()
# stop typing indicator loop
typing_task.cancel()
except Exception as e:
logger.error(f"Exception in message_callback: {e}")
async def start(
self,