summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--plugin/vim-ai.vim28
-rw-r--r--py/chat.py7
-rw-r--r--py/complete.py11
-rw-r--r--py/utils.py2
4 files changed, 39 insertions, 9 deletions
diff --git a/plugin/vim-ai.vim b/plugin/vim-ai.vim
index 9388623..b24c3ed 100644
--- a/plugin/vim-ai.vim
+++ b/plugin/vim-ai.vim
@@ -1,3 +1,28 @@
+let g:vim_ai_complete = {
+\ "options": {
+\ "model": "text-davinci-003",
+\ "max_tokens": 1000,
+\ "temperature": 0.1,
+\ "request_timeout": 10,
+\ },
+\}
+let g:vim_ai_edit = {
+\ "options": {
+\ "model": "text-davinci-003",
+\ "max_tokens": 1000,
+\ "temperature": 0.1,
+\ "request_timeout": 10,
+\ },
+\}
+let g:vim_ai_chat = {
+\ "options": {
+\ "model": "gpt-3.5-turbo",
+\ "max_tokens": 1000,
+\ "temperature": 1,
+\ "request_timeout": 10,
+\ },
+\}
+
let s:plugin_root = expand('<sfile>:p:h:h')
let s:complete_py = s:plugin_root . "/py/complete.py"
let s:chat_py = s:plugin_root . "/py/chat.py"
@@ -22,6 +47,7 @@ endfunction
function! AIRun(...) range
let prompt = MakePrompt(getline(a:firstline, a:lastline), a:0 ? a:1 : "")
+ let options = g:vim_ai_complete['options']
set paste
execute "normal! " . a:lastline . "Go"
execute "py3file " . s:complete_py
@@ -31,6 +57,7 @@ endfunction
function! AIEditRun(...) range
let prompt = MakePrompt(getline(a:firstline, a:lastline), a:0 ? a:1 : "")
+ let options = g:vim_ai_edit['options']
set paste
execute "normal! " . a:firstline . "GV" . a:lastline . "Gc"
execute "py3file " . s:complete_py
@@ -47,6 +74,7 @@ function! AIChatRun(...) range
execute "normal i>>> user\n\n" . prompt
endif
+ let options = g:vim_ai_chat['options']
execute "py3file " . s:chat_py
set nopaste
endfunction
diff --git a/py/chat.py b/py/chat.py
index 7cd7985..eb80e92 100644
--- a/py/chat.py
+++ b/py/chat.py
@@ -6,6 +6,7 @@ vim.command(f"py3file {plugin_root}/py/utils.py")
openai.api_key = load_api_key()
+options = vim.eval("options")
file_content = vim.eval('trim(join(getline(1, "$"), "\n"))')
lines = file_content.splitlines()
@@ -38,10 +39,12 @@ try:
vim.command("redraw")
response = openai.ChatCompletion.create(
- model="gpt-3.5-turbo",
messages=messages,
stream=True,
- request_timeout=request_timeout_seconds,
+ model=options['model'],
+ max_tokens=int(options['max_tokens']),
+ temperature=float(options['temperature']),
+ request_timeout=float(options['request_timeout']),
)
generating_text = False
diff --git a/py/complete.py b/py/complete.py
index d47eb2a..f469592 100644
--- a/py/complete.py
+++ b/py/complete.py
@@ -5,6 +5,7 @@ plugin_root = vim.eval("s:plugin_root")
vim.command(f"py3file {plugin_root}/py/utils.py")
prompt = vim.eval("prompt")
+options = vim.eval("options")
openai.api_key = load_api_key()
@@ -15,12 +16,12 @@ try:
vim.command("redraw")
response = openai.Completion.create(
- model="text-davinci-003",
- prompt=prompt,
- max_tokens=1000,
- temperature=0.1,
stream=True,
- request_timeout=request_timeout_seconds,
+ prompt=prompt,
+ model=options['model'],
+ max_tokens=int(options['max_tokens']),
+ temperature=float(options['temperature']),
+ request_timeout=float(options['request_timeout']),
)
generating_text = False
diff --git a/py/utils.py b/py/utils.py
index 05da3b9..c9e88f2 100644
--- a/py/utils.py
+++ b/py/utils.py
@@ -1,8 +1,6 @@
import sys
import os
-request_timeout_seconds = 15
-
def load_api_key():
config_file_path = os.path.join(os.path.expanduser("~"), ".config/openai.token")
api_key = os.getenv("OPENAI_API_KEY")