summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--README.md7
-rw-r--r--autoload/vim_ai_config.vim3
-rw-r--r--doc/vim-ai.txt3
-rw-r--r--py/chat.py3
-rw-r--r--py/complete.py6
5 files changed, 16 insertions, 6 deletions
diff --git a/README.md b/README.md
index 1ff7973..76996b1 100644
--- a/README.md
+++ b/README.md
@@ -206,6 +206,7 @@ Below are listed all available configuration options, along with their default v
" - engine: complete | chat - see how to configure chat engine in the section below
" - options: openai config (see https://platform.openai.com/docs/api-reference/completions)
" - options.request_timeout: request timeout in seconds
+" - options.base_url: openai endpoint url
" - options.selection_boundary: seleciton prompt wrapper (eliminates empty responses, see #20)
" - ui.paste_mode: use paste mode (see more info in the Notes below)
let g:vim_ai_complete = {
@@ -215,6 +216,7 @@ let g:vim_ai_complete = {
\ "max_tokens": 1000,
\ "temperature": 0.1,
\ "request_timeout": 20,
+\ "base_url": "https://api.openai.com",
\ "selection_boundary": "#####",
\ },
\ "ui": {
@@ -226,6 +228,7 @@ let g:vim_ai_complete = {
" - engine: complete | chat - see how to configure chat engine in the section below
" - options: openai config (see https://platform.openai.com/docs/api-reference/completions)
" - options.request_timeout: request timeout in seconds
+" - options.base_url: openai endpoint url
" - options.selection_boundary: seleciton prompt wrapper (eliminates empty responses, see #20)
" - ui.paste_mode: use paste mode (see more info in the Notes below)
let g:vim_ai_edit = {
@@ -235,6 +238,7 @@ let g:vim_ai_edit = {
\ "max_tokens": 1000,
\ "temperature": 0.1,
\ "request_timeout": 20,
+\ "base_url": "https://api.openai.com",
\ "selection_boundary": "#####",
\ },
\ "ui": {
@@ -254,6 +258,7 @@ END
" - options: openai config (see https://platform.openai.com/docs/api-reference/chat)
" - options.initial_prompt: prompt prepended to every chat request (list of lines or string)
" - options.request_timeout: request timeout in seconds
+" - options.base_url: openai endpoint url
" - options.selection_boundary: seleciton prompt wrapper (eliminates empty responses, see #20)
" - ui.populate_options: put [chat-options] to the chat header
" - ui.open_chat_command: preset (preset_below, preset_tab, preset_right) or a custom command
@@ -265,6 +270,7 @@ let g:vim_ai_chat = {
\ "max_tokens": 1000,
\ "temperature": 1,
\ "request_timeout": 20,
+\ "base_url": "https://api.openai.com",
\ "selection_boundary": "",
\ "initial_prompt": s:initial_chat_prompt,
\ },
@@ -314,6 +320,7 @@ let chat_engine_config = {
\ "max_tokens": 1000,
\ "temperature": 0.1,
\ "request_timeout": 20,
+\ "base_url": "https://api.openai.com",
\ "selection_boundary": "",
\ "initial_prompt": initial_prompt,
\ },
diff --git a/autoload/vim_ai_config.vim b/autoload/vim_ai_config.vim
index c501bbd..5095528 100644
--- a/autoload/vim_ai_config.vim
+++ b/autoload/vim_ai_config.vim
@@ -5,6 +5,7 @@ let g:vim_ai_complete_default = {
\ "max_tokens": 1000,
\ "temperature": 0.1,
\ "request_timeout": 20,
+\ "base_url": "https://api.openai.com",
\ "selection_boundary": "#####",
\ },
\ "ui": {
@@ -18,6 +19,7 @@ let g:vim_ai_edit_default = {
\ "max_tokens": 1000,
\ "temperature": 0.1,
\ "request_timeout": 20,
+\ "base_url": "https://api.openai.com",
\ "selection_boundary": "#####",
\ },
\ "ui": {
@@ -37,6 +39,7 @@ let g:vim_ai_chat_default = {
\ "max_tokens": 1000,
\ "temperature": 1,
\ "request_timeout": 20,
+\ "base_url": "https://api.openai.com",
\ "selection_boundary": "",
\ "initial_prompt": s:initial_chat_prompt,
\ },
diff --git a/doc/vim-ai.txt b/doc/vim-ai.txt
index 931d018..be9df1e 100644
--- a/doc/vim-ai.txt
+++ b/doc/vim-ai.txt
@@ -28,6 +28,7 @@ Options: >
\ "max_tokens": 1000,
\ "temperature": 0.1,
\ "request_timeout": 20,
+ \ "base_url": "https://api.openai.com",
\ "selection_boundary": "#####",
\ },
\ "ui": {
@@ -52,6 +53,7 @@ Options: >
\ "max_tokens": 1000,
\ "temperature": 0.1,
\ "request_timeout": 20,
+ \ "base_url": "https://api.openai.com",
\ "selection_boundary": "#####",
\ },
\ "ui": {
@@ -83,6 +85,7 @@ Options: >
\ "max_tokens": 1000,
\ "temperature": 1,
\ "request_timeout": 20,
+ \ "base_url": "https://api.openai.com",
\ "selection_boundary": "#####",
\ "initial_prompt": s:initial_chat_prompt,
\ },
diff --git a/py/chat.py b/py/chat.py
index 7bca31d..8374efb 100644
--- a/py/chat.py
+++ b/py/chat.py
@@ -70,8 +70,7 @@ try:
**openai_options
}
printDebug("[chat] request: {}", request)
- base_url = options.get('base_url', 'https://api.openai.com')
- url = urljoin(base_url, 'v1/chat/completions')
+ url = urljoin(config_options['base_url'], 'v1/chat/completions')
response = openai_request(url, request, http_options)
def map_chunk(resp):
printDebug("[chat] response: {}", resp)
diff --git a/py/complete.py b/py/complete.py
index a5c4711..668591d 100644
--- a/py/complete.py
+++ b/py/complete.py
@@ -18,8 +18,7 @@ def complete_engine(prompt):
**openai_options
}
printDebug("[engine-complete] request: {}", request)
- base_url = config_options.get('base_url', 'https://api.openai.com')
- url = urljoin(base_url, 'v1/completions')
+ url = urljoin(config_options['base_url'], 'v1/completions')
response = openai_request(url, request, http_options)
def map_chunk(resp):
printDebug("[engine-complete] response: {}", resp)
@@ -38,8 +37,7 @@ def chat_engine(prompt):
**openai_options
}
printDebug("[engine-chat] request: {}", request)
- base_url = config_options.get('base_url', 'https://api.openai.com')
- url = urljoin(base_url, 'v1/chat/completions')
+ url = urljoin(config_options['base_url'], 'v1/chat/completions')
response = openai_request(url, request, http_options)
def map_chunk(resp):
printDebug("[engine-chat] response: {}", resp)