diff options
Diffstat (limited to '')
| -rw-r--r-- | README.md | 24 | ||||
| -rw-r--r-- | autoload/vim_ai_config.vim | 6 | ||||
| -rw-r--r-- | doc/vim-ai.txt | 6 | ||||
| -rw-r--r-- | py/chat.py | 3 | ||||
| -rw-r--r-- | py/complete.py | 6 | ||||
| -rw-r--r-- | py/utils.py | 20 |
6 files changed, 56 insertions, 9 deletions
@@ -210,15 +210,18 @@ Below are listed all available configuration options, along with their default v " - engine: complete | chat - see how to configure chat engine in the section below " - options: openai config (see https://platform.openai.com/docs/api-reference/completions) " - options.request_timeout: request timeout in seconds +" - options.enable_auth: enable authorization using openai key " - options.selection_boundary: seleciton prompt wrapper (eliminates empty responses, see #20) " - ui.paste_mode: use paste mode (see more info in the Notes below) let g:vim_ai_complete = { \ "engine": "complete", \ "options": { \ "model": "gpt-3.5-turbo-instruct", +\ "endpoint_url": "https://api.openai.com/v1/completions", \ "max_tokens": 1000, \ "temperature": 0.1, \ "request_timeout": 20, +\ "enable_auth": 1, \ "selection_boundary": "#####", \ }, \ "ui": { @@ -230,15 +233,18 @@ let g:vim_ai_complete = { " - engine: complete | chat - see how to configure chat engine in the section below " - options: openai config (see https://platform.openai.com/docs/api-reference/completions) " - options.request_timeout: request timeout in seconds +" - options.enable_auth: enable authorization using openai key " - options.selection_boundary: seleciton prompt wrapper (eliminates empty responses, see #20) " - ui.paste_mode: use paste mode (see more info in the Notes below) let g:vim_ai_edit = { \ "engine": "complete", \ "options": { \ "model": "gpt-3.5-turbo-instruct", +\ "endpoint_url": "https://api.openai.com/v1/completions", \ "max_tokens": 1000, \ "temperature": 0.1, \ "request_timeout": 20, +\ "enable_auth": 1, \ "selection_boundary": "#####", \ }, \ "ui": { @@ -258,6 +264,7 @@ END " - options: openai config (see https://platform.openai.com/docs/api-reference/chat) " - options.initial_prompt: prompt prepended to every chat request (list of lines or string) " - options.request_timeout: request timeout in seconds +" - options.enable_auth: enable authorization using openai key " - options.selection_boundary: seleciton prompt wrapper (eliminates empty responses, see #20) " - ui.populate_options: put [chat-options] to the chat header " - ui.open_chat_command: preset (preset_below, preset_tab, preset_right) or a custom command @@ -266,9 +273,11 @@ END let g:vim_ai_chat = { \ "options": { \ "model": "gpt-3.5-turbo", +\ "endpoint_url": "https://api.openai.com/v1/chat/completions", \ "max_tokens": 1000, \ "temperature": 1, \ "request_timeout": 20, +\ "enable_auth": 1, \ "selection_boundary": "", \ "initial_prompt": s:initial_chat_prompt, \ }, @@ -293,6 +302,20 @@ let g:vim_ai_chat = { " hits token limit, which respond with `OpenAI: HTTPError 400` ``` +### Using custom API + +It is possible to configure the plugin to use different OpenAI-compatible endpoints. +See some cool projects listed in [Custom APIs](https://github.com/madox2/vim-ai/wiki/Custom-APIs) section on the [Community Wiki](https://github.com/madox2/vim-ai/wiki). + +```vim +let g:vim_ai_chat = { +\ "options": { +\ "endpoint_url": "http://localhost:8000/v1/chat/completions", +\ "enable_auth": 0, +\ }, +\} +``` + ### Using chat engine for completion and edits It is possible to configure chat models, such as `gpt-3.5-turbo`, to be used in `:AI` and `:AIEdit` commands. @@ -315,6 +338,7 @@ let chat_engine_config = { \ "engine": "chat", \ "options": { \ "model": "gpt-3.5-turbo", +\ "endpoint_url": "https://api.openai.com/v1/chat/completions", \ "max_tokens": 1000, \ "temperature": 0.1, \ "request_timeout": 20, diff --git a/autoload/vim_ai_config.vim b/autoload/vim_ai_config.vim index d60475d..6119e4f 100644 --- a/autoload/vim_ai_config.vim +++ b/autoload/vim_ai_config.vim @@ -2,9 +2,11 @@ let g:vim_ai_complete_default = { \ "engine": "complete", \ "options": { \ "model": "gpt-3.5-turbo-instruct", +\ "endpoint_url": "https://api.openai.com/v1/completions", \ "max_tokens": 1000, \ "temperature": 0.1, \ "request_timeout": 20, +\ "enable_auth": 1, \ "selection_boundary": "#####", \ }, \ "ui": { @@ -15,9 +17,11 @@ let g:vim_ai_edit_default = { \ "engine": "complete", \ "options": { \ "model": "gpt-3.5-turbo-instruct", +\ "endpoint_url": "https://api.openai.com/v1/completions", \ "max_tokens": 1000, \ "temperature": 0.1, \ "request_timeout": 20, +\ "enable_auth": 1, \ "selection_boundary": "#####", \ }, \ "ui": { @@ -34,9 +38,11 @@ END let g:vim_ai_chat_default = { \ "options": { \ "model": "gpt-3.5-turbo", +\ "endpoint_url": "https://api.openai.com/v1/chat/completions", \ "max_tokens": 1000, \ "temperature": 1, \ "request_timeout": 20, +\ "enable_auth": 1, \ "selection_boundary": "", \ "initial_prompt": s:initial_chat_prompt, \ }, diff --git a/doc/vim-ai.txt b/doc/vim-ai.txt index 86965ee..f5f2cde 100644 --- a/doc/vim-ai.txt +++ b/doc/vim-ai.txt @@ -25,9 +25,11 @@ Options: > \ "engine": "complete", \ "options": { \ "model": "gpt-3.5-turbo-instruct", + \ "endpoint_url": "https://api.openai.com/v1/completions", \ "max_tokens": 1000, \ "temperature": 0.1, \ "request_timeout": 20, + \ "enable_auth": 1, \ "selection_boundary": "#####", \ }, \ "ui": { @@ -49,9 +51,11 @@ Options: > \ "engine": "complete", \ "options": { \ "model": "gpt-3.5-turbo-instruct", + \ "endpoint_url": "https://api.openai.com/v1/completions", \ "max_tokens": 1000, \ "temperature": 0.1, \ "request_timeout": 20, + \ "enable_auth": 1, \ "selection_boundary": "#####", \ }, \ "ui": { @@ -81,8 +85,10 @@ Options: > \ "options": { \ "model": "gpt-3.5-turbo", \ "max_tokens": 1000, + \ "endpoint_url": "https://api.openai.com/v1/chat/completions", \ "temperature": 1, \ "request_timeout": 20, + \ "enable_auth": 1, \ "selection_boundary": "#####", \ "initial_prompt": s:initial_chat_prompt, \ }, @@ -69,7 +69,8 @@ try: **openai_options } printDebug("[chat] request: {}", request) - response = openai_request('https://api.openai.com/v1/chat/completions', request, http_options) + url = config_options['endpoint_url'] + response = openai_request(url, request, http_options) def map_chunk(resp): printDebug("[chat] response: {}", resp) return resp['choices'][0]['delta'].get('content', '') diff --git a/py/complete.py b/py/complete.py index c8d45fe..8386c09 100644 --- a/py/complete.py +++ b/py/complete.py @@ -17,7 +17,8 @@ def complete_engine(prompt): **openai_options } printDebug("[engine-complete] request: {}", request) - response = openai_request('https://api.openai.com/v1/completions', request, http_options) + url = config_options['endpoint_url'] + response = openai_request(url, request, http_options) def map_chunk(resp): printDebug("[engine-complete] response: {}", resp) return resp['choices'][0].get('text', '') @@ -35,7 +36,8 @@ def chat_engine(prompt): **openai_options } printDebug("[engine-chat] request: {}", request) - response = openai_request('https://api.openai.com/v1/chat/completions', request, http_options) + url = config_options['endpoint_url'] + response = openai_request(url, request, http_options) def map_chunk(resp): printDebug("[engine-chat] response: {}", resp) return resp['choices'][0]['delta'].get('content', '') diff --git a/py/utils.py b/py/utils.py index 2e1f975..e5203bd 100644 --- a/py/utils.py +++ b/py/utils.py @@ -14,6 +14,9 @@ import traceback is_debugging = vim.eval("g:vim_ai_debug") == "1" debug_log_file = vim.eval("g:vim_ai_debug_log_file") +class KnownError(Exception): + pass + def load_api_key(): config_file_path = os.path.join(os.path.expanduser("~"), ".config/openai.token") api_key_param_value = os.getenv("OPENAI_API_KEY") @@ -24,7 +27,7 @@ def load_api_key(): pass if not api_key_param_value: - raise Exception("Missing OpenAI API key") + raise KnownError("Missing OpenAI API key") # The text is in format of "<api key>,<org id>" and the # <org id> part is optional @@ -56,6 +59,7 @@ def make_openai_options(options): def make_http_options(options): return { 'request_timeout': float(options['request_timeout']), + 'enable_auth': bool(int(options['enable_auth'])), } def render_text_chunks(chunks): @@ -130,16 +134,18 @@ def printDebug(text, *args): OPENAI_RESP_DATA_PREFIX = 'data: ' OPENAI_RESP_DONE = '[DONE]' -(OPENAI_API_KEY, OPENAI_ORG_ID) = load_api_key() def openai_request(url, data, options): + enable_auth=options['enable_auth'] headers = { "Content-Type": "application/json", - "Authorization": f"Bearer {OPENAI_API_KEY}" } + if enable_auth: + (OPENAI_API_KEY, OPENAI_ORG_ID) = load_api_key() + headers['Authorization'] = f"Bearer {OPENAI_API_KEY}" - if OPENAI_ORG_ID is not None: - headers["OpenAI-Organization"] = f"{OPENAI_ORG_ID}" + if OPENAI_ORG_ID is not None: + headers["OpenAI-Organization"] = f"{OPENAI_ORG_ID}" request_timeout=options['request_timeout'] req = urllib.request.Request( @@ -153,7 +159,7 @@ def openai_request(url, data, options): line = line_bytes.decode("utf-8", errors="replace") if line.startswith(OPENAI_RESP_DATA_PREFIX): line_data = line[len(OPENAI_RESP_DATA_PREFIX):-1] - if line_data == OPENAI_RESP_DONE: + if line_data.strip() == OPENAI_RESP_DONE: pass else: openai_obj = json.loads(line_data) @@ -183,6 +189,8 @@ def handle_completion_error(error): elif status_code == 429: msg += ' (Hint: verify that your billing plan is "Pay as you go")' print_info_message(msg) + elif isinstance(error, KnownError): + print_info_message(str(error)) else: raise error |