From 356ead8aa66e939d78bf38b4e5515545bbdf5a91 Mon Sep 17 00:00:00 2001 From: juodumas Date: Mon, 18 Sep 2023 13:59:51 +0300 Subject: Add support for base_url option to use local models For example, you can start llama-cpp-python like this (it emulates the openai api): ```sh CMAKE_ARGS="-DLLAMA_CUBLAS=on" pip install 'llama-cpp-python[server]' wget https://huggingface.co/TheBloke/CodeLlama-13B-Instruct-GGUF/resolve/main/codellama-13b-instruct.Q5_K_M.gguf python3 -m llama_cpp.server --n_gpu_layers 100 --model codellama-13b-instruct.Q5_K_M.gguf ``` Then set the API url in your `.vimrc`: ```vim let g:vim_ai_chat = { \ "engine": "chat", \ "options": { \ "base_url": "http://127.0.0.1:8000", \ }, \ } ``` And chat with the locally hosted AI using `:AIChat`. The change in utils.py was needed because llama-cpp-python adds a new line to the final response: `[DONE]^M`. --- py/complete.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'py/complete.py') diff --git a/py/complete.py b/py/complete.py index c8d45fe..a5c4711 100644 --- a/py/complete.py +++ b/py/complete.py @@ -1,3 +1,4 @@ +from urllib.parse import urljoin # import utils plugin_root = vim.eval("s:plugin_root") vim.command(f"py3file {plugin_root}/py/utils.py") @@ -17,7 +18,9 @@ def complete_engine(prompt): **openai_options } printDebug("[engine-complete] request: {}", request) - response = openai_request('https://api.openai.com/v1/completions', request, http_options) + base_url = config_options.get('base_url', 'https://api.openai.com') + url = urljoin(base_url, 'v1/completions') + response = openai_request(url, request, http_options) def map_chunk(resp): printDebug("[engine-complete] response: {}", resp) return resp['choices'][0].get('text', '') @@ -35,7 +38,9 @@ def chat_engine(prompt): **openai_options } printDebug("[engine-chat] request: {}", request) - response = openai_request('https://api.openai.com/v1/chat/completions', request, http_options) + base_url = config_options.get('base_url', 'https://api.openai.com') + url = urljoin(base_url, 'v1/chat/completions') + response = openai_request(url, request, http_options) def map_chunk(resp): printDebug("[engine-chat] response: {}", resp) return resp['choices'][0]['delta'].get('content', '') -- cgit v1.2.3 From bd0e7668f6709b8fc9cac79e42ccecafde949aff Mon Sep 17 00:00:00 2001 From: Martin Bielik Date: Sat, 21 Oct 2023 12:30:41 +0200 Subject: base_url extracted to config, docu --- py/complete.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'py/complete.py') diff --git a/py/complete.py b/py/complete.py index a5c4711..668591d 100644 --- a/py/complete.py +++ b/py/complete.py @@ -18,8 +18,7 @@ def complete_engine(prompt): **openai_options } printDebug("[engine-complete] request: {}", request) - base_url = config_options.get('base_url', 'https://api.openai.com') - url = urljoin(base_url, 'v1/completions') + url = urljoin(config_options['base_url'], 'v1/completions') response = openai_request(url, request, http_options) def map_chunk(resp): printDebug("[engine-complete] response: {}", resp) @@ -38,8 +37,7 @@ def chat_engine(prompt): **openai_options } printDebug("[engine-chat] request: {}", request) - base_url = config_options.get('base_url', 'https://api.openai.com') - url = urljoin(base_url, 'v1/chat/completions') + url = urljoin(config_options['base_url'], 'v1/chat/completions') response = openai_request(url, request, http_options) def map_chunk(resp): printDebug("[engine-chat] response: {}", resp) -- cgit v1.2.3 From d9e1e193b6d8a8d2eb4eb2deb64d774ab5d5079b Mon Sep 17 00:00:00 2001 From: Martin Bielik Date: Sat, 21 Oct 2023 18:29:55 +0200 Subject: endpoint_url config --- py/complete.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'py/complete.py') diff --git a/py/complete.py b/py/complete.py index 668591d..4b07271 100644 --- a/py/complete.py +++ b/py/complete.py @@ -18,7 +18,7 @@ def complete_engine(prompt): **openai_options } printDebug("[engine-complete] request: {}", request) - url = urljoin(config_options['base_url'], 'v1/completions') + url = config_options['endpoint_url'] response = openai_request(url, request, http_options) def map_chunk(resp): printDebug("[engine-complete] response: {}", resp) @@ -37,7 +37,7 @@ def chat_engine(prompt): **openai_options } printDebug("[engine-chat] request: {}", request) - url = urljoin(config_options['base_url'], 'v1/chat/completions') + url = config_options['endpoint_url'] response = openai_request(url, request, http_options) def map_chunk(resp): printDebug("[engine-chat] response: {}", resp) -- cgit v1.2.3 From 55c4e2ec836e48552b52fb4b7878f7b50f67b53b Mon Sep 17 00:00:00 2001 From: Martin Bielik Date: Sat, 21 Oct 2023 19:02:54 +0200 Subject: removed unused import --- py/complete.py | 1 - 1 file changed, 1 deletion(-) (limited to 'py/complete.py') diff --git a/py/complete.py b/py/complete.py index 4b07271..8386c09 100644 --- a/py/complete.py +++ b/py/complete.py @@ -1,4 +1,3 @@ -from urllib.parse import urljoin # import utils plugin_root = vim.eval("s:plugin_root") vim.command(f"py3file {plugin_root}/py/utils.py") -- cgit v1.2.3