summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMartin Bielik <mx.bielik@gmail.com>2023-10-21 18:29:55 +0200
committerMartin Bielik <mx.bielik@gmail.com>2023-10-21 18:29:55 +0200
commitd9e1e193b6d8a8d2eb4eb2deb64d774ab5d5079b (patch)
treedb9ff9c759ea535e8d49d6bd1f469535ae8498c3
parentdca2bcf256df92196febf9bae77206bb6e51dac1 (diff)
downloadvim-ai-d9e1e193b6d8a8d2eb4eb2deb64d774ab5d5079b.tar.gz
endpoint_url config
Diffstat (limited to '')
-rw-r--r--README.md10
-rw-r--r--autoload/vim_ai_config.vim6
-rw-r--r--doc/vim-ai.txt6
-rw-r--r--py/chat.py2
-rw-r--r--py/complete.py4
5 files changed, 13 insertions, 15 deletions
diff --git a/README.md b/README.md
index 6439b49..d9d7a29 100644
--- a/README.md
+++ b/README.md
@@ -206,7 +206,6 @@ Below are listed all available configuration options, along with their default v
" - engine: complete | chat - see how to configure chat engine in the section below
" - options: openai config (see https://platform.openai.com/docs/api-reference/completions)
" - options.request_timeout: request timeout in seconds
-" - options.base_url: openai endpoint url
" - options.enable_auth: enable authorization using openai key
" - options.selection_boundary: seleciton prompt wrapper (eliminates empty responses, see #20)
" - ui.paste_mode: use paste mode (see more info in the Notes below)
@@ -214,10 +213,10 @@ let g:vim_ai_complete = {
\ "engine": "complete",
\ "options": {
\ "model": "text-davinci-003",
+\ "endpoint_url": "https://api.openai.com/v1/completions",
\ "max_tokens": 1000,
\ "temperature": 0.1,
\ "request_timeout": 20,
-\ "base_url": "https://api.openai.com",
\ "enable_auth": 1,
\ "selection_boundary": "#####",
\ },
@@ -230,7 +229,6 @@ let g:vim_ai_complete = {
" - engine: complete | chat - see how to configure chat engine in the section below
" - options: openai config (see https://platform.openai.com/docs/api-reference/completions)
" - options.request_timeout: request timeout in seconds
-" - options.base_url: openai endpoint url
" - options.enable_auth: enable authorization using openai key
" - options.selection_boundary: seleciton prompt wrapper (eliminates empty responses, see #20)
" - ui.paste_mode: use paste mode (see more info in the Notes below)
@@ -238,10 +236,10 @@ let g:vim_ai_edit = {
\ "engine": "complete",
\ "options": {
\ "model": "text-davinci-003",
+\ "endpoint_url": "https://api.openai.com/v1/completions",
\ "max_tokens": 1000,
\ "temperature": 0.1,
\ "request_timeout": 20,
-\ "base_url": "https://api.openai.com",
\ "enable_auth": 1,
\ "selection_boundary": "#####",
\ },
@@ -262,7 +260,6 @@ END
" - options: openai config (see https://platform.openai.com/docs/api-reference/chat)
" - options.initial_prompt: prompt prepended to every chat request (list of lines or string)
" - options.request_timeout: request timeout in seconds
-" - options.base_url: openai endpoint url
" - options.enable_auth: enable authorization using openai key
" - options.selection_boundary: seleciton prompt wrapper (eliminates empty responses, see #20)
" - ui.populate_options: put [chat-options] to the chat header
@@ -272,10 +269,10 @@ END
let g:vim_ai_chat = {
\ "options": {
\ "model": "gpt-3.5-turbo",
+\ "endpoint_url": "https://api.openai.com/v1/chat/completions",
\ "max_tokens": 1000,
\ "temperature": 1,
\ "request_timeout": 20,
-\ "base_url": "https://api.openai.com",
\ "enable_auth": 1,
\ "selection_boundary": "",
\ "initial_prompt": s:initial_chat_prompt,
@@ -323,6 +320,7 @@ let chat_engine_config = {
\ "engine": "chat",
\ "options": {
\ "model": "gpt-3.5-turbo",
+\ "endpoint_url": "https://api.openai.com/v1/chat/completions",
\ "max_tokens": 1000,
\ "temperature": 0.1,
\ "request_timeout": 20,
diff --git a/autoload/vim_ai_config.vim b/autoload/vim_ai_config.vim
index 4aadc5a..54645ed 100644
--- a/autoload/vim_ai_config.vim
+++ b/autoload/vim_ai_config.vim
@@ -2,10 +2,10 @@ let g:vim_ai_complete_default = {
\ "engine": "complete",
\ "options": {
\ "model": "text-davinci-003",
+\ "endpoint_url": "https://api.openai.com/v1/completions",
\ "max_tokens": 1000,
\ "temperature": 0.1,
\ "request_timeout": 20,
-\ "base_url": "https://api.openai.com",
\ "enable_auth": 1,
\ "selection_boundary": "#####",
\ },
@@ -17,10 +17,10 @@ let g:vim_ai_edit_default = {
\ "engine": "complete",
\ "options": {
\ "model": "text-davinci-003",
+\ "endpoint_url": "https://api.openai.com/v1/completions",
\ "max_tokens": 1000,
\ "temperature": 0.1,
\ "request_timeout": 20,
-\ "base_url": "https://api.openai.com",
\ "enable_auth": 1,
\ "selection_boundary": "#####",
\ },
@@ -38,10 +38,10 @@ END
let g:vim_ai_chat_default = {
\ "options": {
\ "model": "gpt-3.5-turbo",
+\ "endpoint_url": "https://api.openai.com/v1/chat/completions",
\ "max_tokens": 1000,
\ "temperature": 1,
\ "request_timeout": 20,
-\ "base_url": "https://api.openai.com",
\ "enable_auth": 1,
\ "selection_boundary": "",
\ "initial_prompt": s:initial_chat_prompt,
diff --git a/doc/vim-ai.txt b/doc/vim-ai.txt
index fa044f9..85ca5ff 100644
--- a/doc/vim-ai.txt
+++ b/doc/vim-ai.txt
@@ -25,10 +25,10 @@ Options: >
\ "engine": "complete",
\ "options": {
\ "model": "text-davinci-003",
+ \ "endpoint_url": "https://api.openai.com/v1/completions",
\ "max_tokens": 1000,
\ "temperature": 0.1,
\ "request_timeout": 20,
- \ "base_url": "https://api.openai.com",
\ "enable_auth": 1,
\ "selection_boundary": "#####",
\ },
@@ -51,10 +51,10 @@ Options: >
\ "engine": "complete",
\ "options": {
\ "model": "text-davinci-003",
+ \ "endpoint_url": "https://api.openai.com/v1/completions",
\ "max_tokens": 1000,
\ "temperature": 0.1,
\ "request_timeout": 20,
- \ "base_url": "https://api.openai.com",
\ "enable_auth": 1,
\ "selection_boundary": "#####",
\ },
@@ -85,9 +85,9 @@ Options: >
\ "options": {
\ "model": "gpt-3.5-turbo",
\ "max_tokens": 1000,
+ \ "endpoint_url": "https://api.openai.com/v1/chat/completions",
\ "temperature": 1,
\ "request_timeout": 20,
- \ "base_url": "https://api.openai.com",
\ "enable_auth": 1,
\ "selection_boundary": "#####",
\ "initial_prompt": s:initial_chat_prompt,
diff --git a/py/chat.py b/py/chat.py
index 8374efb..71783ca 100644
--- a/py/chat.py
+++ b/py/chat.py
@@ -70,7 +70,7 @@ try:
**openai_options
}
printDebug("[chat] request: {}", request)
- url = urljoin(config_options['base_url'], 'v1/chat/completions')
+ url = config_options['endpoint_url']
response = openai_request(url, request, http_options)
def map_chunk(resp):
printDebug("[chat] response: {}", resp)
diff --git a/py/complete.py b/py/complete.py
index 668591d..4b07271 100644
--- a/py/complete.py
+++ b/py/complete.py
@@ -18,7 +18,7 @@ def complete_engine(prompt):
**openai_options
}
printDebug("[engine-complete] request: {}", request)
- url = urljoin(config_options['base_url'], 'v1/completions')
+ url = config_options['endpoint_url']
response = openai_request(url, request, http_options)
def map_chunk(resp):
printDebug("[engine-complete] response: {}", resp)
@@ -37,7 +37,7 @@ def chat_engine(prompt):
**openai_options
}
printDebug("[engine-chat] request: {}", request)
- url = urljoin(config_options['base_url'], 'v1/chat/completions')
+ url = config_options['endpoint_url']
response = openai_request(url, request, http_options)
def map_chunk(resp):
printDebug("[engine-chat] response: {}", resp)