summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMartin Bielik <mx.bielik@gmail.com>2023-03-22 22:14:25 +0100
committerMartin Bielik <mx.bielik@gmail.com>2023-03-22 22:14:25 +0100
commit08036fb76c437b705d916708d903c7c6c2eef0ba (patch)
treef8d1c4f3f652c13c265d2f75e104f3b3907c4551
parent9b707d99b106c6535e476f00095053b470428bcc (diff)
downloadvim-ai-08036fb76c437b705d916708d903c7c6c2eef0ba.tar.gz
completion configuration
-rw-r--r--README.md51
-rw-r--r--doc/vim-ai.txt42
-rw-r--r--plugin/vim-ai.vim20
-rw-r--r--py/chat.py15
-rw-r--r--py/complete.py11
-rw-r--r--py/utils.py9
6 files changed, 124 insertions, 24 deletions
diff --git a/README.md b/README.md
index a2cff90..1009a37 100644
--- a/README.md
+++ b/README.md
@@ -87,6 +87,57 @@ nnoremap <leader>a :AI<CR>
xnoremap <leader>a :AI<CR>
```
+### Completion configuration
+
+Request to the OpenAI API can be configured for each command.
+To customize the default configuration, initialize the config variable with a selection of options. For example:
+
+```vim
+let g:vim_ai_chat = {
+\ "options": {
+\ "model": "gpt-4",
+\ "temperature": 0.2,
+\ },
+\}
+```
+
+Below are listed available options along with default values:
+
+```vim
+" :AI
+" - https://platform.openai.com/docs/api-reference/completions
+let g:vim_ai_complete = {
+\ "options": {
+\ "model": "text-davinci-003",
+\ "max_tokens": 1000,
+\ "temperature": 0.1,
+\ "request_timeout": 10,
+\ },
+\}
+
+" :AIEdit
+" - https://platform.openai.com/docs/api-reference/completions
+let g:vim_ai_edit = {
+\ "options": {
+\ "model": "text-davinci-003",
+\ "max_tokens": 1000,
+\ "temperature": 0.1,
+\ "request_timeout": 10,
+\ },
+\}
+
+" :AIChat
+" - https://platform.openai.com/docs/api-reference/chat
+let g:vim_ai_chat = {
+\ "options": {
+\ "model": "gpt-3.5-turbo",
+\ "max_tokens": 1000,
+\ "temperature": 1,
+\ "request_timeout": 10,
+\ },
+\}
+```
+
### Custom commands
To customize and re-use prompts it is useful to put some context to the language model. You can do it with prepending text to `:AI` command.
diff --git a/doc/vim-ai.txt b/doc/vim-ai.txt
index 9b7f7f7..5657756 100644
--- a/doc/vim-ai.txt
+++ b/doc/vim-ai.txt
@@ -20,6 +20,16 @@ https://github.com/madox2/vim-ai
(selection) :AI complete the selection
(selection) :AI {instruction} complete the selection using the instruction
+Options: >
+ let g:vim_ai_complete = {
+ \ "options": {
+ \ "model": "text-davinci-003",
+ \ "max_tokens": 1000,
+ \ "temperature": 0.1,
+ \ "request_timeout": 10,
+ \ },
+ \}
+
Check OpenAI docs for more infomration:
https://platform.openai.com/docs/api-reference/completions
@@ -29,6 +39,16 @@ https://platform.openai.com/docs/api-reference/completions
(selection)? :AIEdit {instruction} edit the current line or the selection using
the instruction
+Options: >
+ let g:vim_ai_edit = {
+ \ "options": {
+ \ "model": "text-davinci-003",
+ \ "max_tokens": 1000,
+ \ "temperature": 0.1,
+ \ "request_timeout": 10,
+ \ },
+ \}
+
Check OpenAI docs for more infomration:
https://platform.openai.com/docs/api-reference/completions
@@ -39,9 +59,31 @@ https://platform.openai.com/docs/api-reference/completions
(selection)? :AIChat {instruction}? start a new conversation given the selection,
the instruction or both
+Options: >
+ let g:vim_ai_chat = {
+ \ "options": {
+ \ "model": "gpt-3.5-turbo",
+ \ "max_tokens": 1000,
+ \ "temperature": 1,
+ \ "request_timeout": 10,
+ \ },
+ \}
+
Check OpenAI docs for more infomration:
https://platform.openai.com/docs/api-reference/chat
+CONFIGURATION *vim-ai-config*
+
+To customize the default configuration, initialize the config variable with
+a selection of options: >
+
+ let g:vim_ai_chat = {
+ \ "options": {
+ \ "model": "gpt-4",
+ \ "temperature": 0.2,
+ \ },
+ \}
+
ABOUT *vim-ai-about*
Contributions are welcome on GitHub:
diff --git a/plugin/vim-ai.vim b/plugin/vim-ai.vim
index eeb913a..7570f69 100644
--- a/plugin/vim-ai.vim
+++ b/plugin/vim-ai.vim
@@ -1,4 +1,4 @@
-let g:vim_ai_complete = {
+let g:vim_ai_complete_default = {
\ "options": {
\ "model": "text-davinci-003",
\ "max_tokens": 1000,
@@ -6,7 +6,7 @@ let g:vim_ai_complete = {
\ "request_timeout": 10,
\ },
\}
-let g:vim_ai_edit = {
+let g:vim_ai_edit_default = {
\ "options": {
\ "model": "text-davinci-003",
\ "max_tokens": 1000,
@@ -14,7 +14,7 @@ let g:vim_ai_edit = {
\ "request_timeout": 10,
\ },
\}
-let g:vim_ai_chat = {
+let g:vim_ai_chat_default = {
\ "options": {
\ "model": "gpt-3.5-turbo",
\ "max_tokens": 1000,
@@ -22,6 +22,16 @@ let g:vim_ai_chat = {
\ "request_timeout": 10,
\ },
\}
+if !exists('g:vim_ai_complete')
+ let g:vim_ai_complete = {"options":{}}
+endif
+if !exists('g:vim_ai_edit')
+ let g:vim_ai_edit = {"options":{}}
+endif
+if !exists('g:vim_ai_chat')
+ let g:vim_ai_chat = {"options":{}}
+endif
+
let s:plugin_root = expand('<sfile>:p:h:h')
let s:complete_py = s:plugin_root . "/py/complete.py"
@@ -46,6 +56,7 @@ endfunction
function! AIRun(is_selection, ...) range
let prompt = MakePrompt(a:is_selection, getline(a:firstline, a:lastline), a:0 ? a:1 : "")
+ let options_default = g:vim_ai_complete_default['options']
let options = g:vim_ai_complete['options']
set paste
execute "normal! " . a:lastline . "Go"
@@ -56,7 +67,7 @@ endfunction
function! AIEditRun(is_selection, ...) range
let prompt = MakePrompt(a:is_selection, getline(a:firstline, a:lastline), a:0 ? a:1 : "")
- echo prompt
+ let options_default = g:vim_ai_edit_default['options']
let options = g:vim_ai_edit['options']
set paste
execute "normal! " . a:firstline . "GV" . a:lastline . "Gc"
@@ -77,6 +88,7 @@ function! AIChatRun(is_selection, ...) range
execute "normal i>>> user\n\n" . prompt
endif
+ let options_default = g:vim_ai_chat_default['options']
let options = g:vim_ai_chat['options']
execute "py3file " . s:chat_py
set nopaste
diff --git a/py/chat.py b/py/chat.py
index eb80e92..294ae63 100644
--- a/py/chat.py
+++ b/py/chat.py
@@ -4,11 +4,11 @@ import openai
plugin_root = vim.eval("s:plugin_root")
vim.command(f"py3file {plugin_root}/py/utils.py")
-openai.api_key = load_api_key()
-
-options = vim.eval("options")
+options = make_options()
file_content = vim.eval('trim(join(getline(1, "$"), "\n"))')
+openai.api_key = load_api_key()
+
lines = file_content.splitlines()
messages = []
@@ -38,14 +38,7 @@ try:
print('Answering...')
vim.command("redraw")
- response = openai.ChatCompletion.create(
- messages=messages,
- stream=True,
- model=options['model'],
- max_tokens=int(options['max_tokens']),
- temperature=float(options['temperature']),
- request_timeout=float(options['request_timeout']),
- )
+ response = openai.ChatCompletion.create(messages=messages, stream=True, **options)
generating_text = False
for resp in response:
diff --git a/py/complete.py b/py/complete.py
index f469592..f82707a 100644
--- a/py/complete.py
+++ b/py/complete.py
@@ -5,7 +5,7 @@ plugin_root = vim.eval("s:plugin_root")
vim.command(f"py3file {plugin_root}/py/utils.py")
prompt = vim.eval("prompt")
-options = vim.eval("options")
+options = make_options()
openai.api_key = load_api_key()
@@ -15,14 +15,7 @@ try:
print('Completing...')
vim.command("redraw")
- response = openai.Completion.create(
- stream=True,
- prompt=prompt,
- model=options['model'],
- max_tokens=int(options['max_tokens']),
- temperature=float(options['temperature']),
- request_timeout=float(options['request_timeout']),
- )
+ response = openai.Completion.create(stream=True, prompt=prompt, **options)
generating_text = False
for resp in response:
diff --git a/py/utils.py b/py/utils.py
index c9e88f2..8366eec 100644
--- a/py/utils.py
+++ b/py/utils.py
@@ -10,3 +10,12 @@ def load_api_key():
except Exception:
pass
return api_key.strip()
+
+def make_options():
+ options_default = vim.eval("options_default")
+ options_user = vim.eval("options")
+ options = {**options_default, **options_user}
+ options['request_timeout'] = float(options['request_timeout'])
+ options['temperature'] = float(options['temperature'])
+ options['max_tokens'] = int(options['max_tokens'])
+ return options