From 1f368bf3d244e54de34c395ea44fe3d3e7e3bc33 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Laure=CE=B7t?= Date: Mon, 12 Jun 2023 21:17:52 +0200 Subject: [PATCH] =?UTF-8?q?=F0=9F=93=9D=20add=20Installation=20and=20Usage?= =?UTF-8?q?=20instructions?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- README.md | 82 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 82 insertions(+) diff --git a/README.md b/README.md index 62ec86b..a1fb325 100644 --- a/README.md +++ b/README.md @@ -6,6 +6,88 @@ You own little LLM in your matrix chatroom. +## Installation + +```bash +pip install git+https://github.com/Laurent2916/nio-llm.git +``` + +## Usage + +This project uses [jsonargparse](https://github.com/omni-us/jsonargparse/) to help with the command line arguments. + +To see the available options, run: + +```bash +nio_llm --help +``` + +To run the bot, you can either use command line arguments, environment variables or a config file. (or a mix of all three) + +### Command line arguments + +```bash +nio_llm \ + # required \ + --room \ + --password \ + --username \ + --preprompt \ + # optional \ + --device-id nio-llm \ + --homeserver https://matrix.org \ + --ggml-repoid TheBloke/stable-vicuna-13B-GGML \ + --ggml-filename stable-vicuna-13B.ggmlv3.q5_1.bin \ + --sync-timeout 30000 +``` + +### Environment variables + +```bash +# required +export NIO_LLM_ROOM= +export NIO_LLM_PASSWORD= +export NIO_LLM_USERNAME= +export NIO_LLM_PREPROMPT= + +# optional +export NIO_LLM_DEVICE_ID=nio-llm +export NIO_LLM_HOMESERVER=https://matrix.org +export NIO_LLM_GGML_REPOID=TheBloke/stable-vicuna-13B-GGML +export NIO_LLM_GGML_FILENAME=stable-vicuna-13B.ggmlv3.q5_1.bin +export NIO_LLM_SYNC_TIMEOUT=30000 + +nio_llm +``` + + +### Config file + +Create a config file with the following content: + +```yaml +# config_file.yaml + +# required +room: +password: +username: +preprompt: + +# optional +device_id: nio-llm +homeserver: https://matrix.org +ggml_repoid: TheBloke/stable-vicuna-13B-GGML +ggml_filename: stable-vicuna-13B.ggmlv3.q5_1.bin +sync_timeout: 30000 +``` + +Then run: + +```bash +nio_llm --config config_file.yaml +``` + ## Special thanks - https://github.com/abetlen/llama-cpp-python