summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--plugin/vim-ai.vim8
-rw-r--r--py/chat.py13
-rw-r--r--py/complete.py26
-rw-r--r--py/utils.py10
4 files changed, 51 insertions, 6 deletions
diff --git a/plugin/vim-ai.vim b/plugin/vim-ai.vim
index 972e04f..6eb563a 100644
--- a/plugin/vim-ai.vim
+++ b/plugin/vim-ai.vim
@@ -38,6 +38,14 @@ let g:vim_ai_chat_default = {
\ },
\}
+if !exists("g:vim_ai_debug")
+ let g:vim_ai_debug = 0
+endif
+
+if !exists("g:vim_ai_debug_log_file")
+ let g:vim_ai_debug_log_file = "/tmp/vim_ai_debug.log"
+endif
+
function! s:ExtendDeep(defaults, override) abort
let l:result = a:defaults
for [l:key, l:value] in items(a:override)
diff --git a/py/chat.py b/py/chat.py
index 0e3245f..6e94665 100644
--- a/py/chat.py
+++ b/py/chat.py
@@ -74,8 +74,17 @@ try:
print('Answering...')
vim.command("redraw")
- response = openai.ChatCompletion.create(messages=messages, stream=True, **request_options)
- text_chunks = map(lambda resp: resp['choices'][0]['delta'].get('content', ''), response)
+ request = {
+ 'stream': True,
+ 'messages': messages,
+ **request_options
+ }
+ printDebug("[chat] request: {}", request)
+ response = openai.ChatCompletion.create(**request)
+ def map_chunk(resp):
+ printDebug("[chat] response: {}", resp)
+ return resp['choices'][0]['delta'].get('content', '')
+ text_chunks = map(map_chunk, response)
render_text_chunks(text_chunks)
vim.command("normal! a\n\n>>> user\n\n")
diff --git a/py/complete.py b/py/complete.py
index b0b0dc5..eea2add 100644
--- a/py/complete.py
+++ b/py/complete.py
@@ -13,8 +13,17 @@ prompt = vim.eval("prompt").strip()
openai.api_key = load_api_key()
def complete_engine(prompt):
- response = openai.Completion.create(stream=True, prompt=prompt, **request_options)
- text_chunks = map(lambda resp: resp['choices'][0].get('text', ''), response)
+ request = {
+ 'stream': True,
+ 'prompt': prompt,
+ **request_options
+ }
+ printDebug("[engine-complete] request: {}", request)
+ response = openai.Completion.create(**request)
+ def map_chunk(resp):
+ printDebug("[engine-complete] response: {}", resp)
+ return resp['choices'][0].get('text', '')
+ text_chunks = map(map_chunk, response)
return text_chunks
def chat_engine(prompt):
@@ -22,8 +31,17 @@ def chat_engine(prompt):
initial_prompt = '\n'.join(initial_prompt)
chat_content = f"{initial_prompt}\n\n>>> user\n\n{prompt}".strip()
messages = parse_chat_messages(chat_content)
- response = openai.ChatCompletion.create(messages=messages, stream=True, **request_options)
- text_chunks = map(lambda resp: resp['choices'][0]['delta'].get('content', ''), response)
+ request = {
+ 'stream': True,
+ 'messages': messages,
+ **request_options
+ }
+ printDebug("[engine-chat] request: {}", request)
+ response = openai.ChatCompletion.create(**request)
+ def map_chunk(resp):
+ printDebug("[engine-chat] response: {}", resp)
+ return resp['choices'][0]['delta'].get('content', '')
+ text_chunks = map(map_chunk, response)
return text_chunks
engines = {"chat": chat_engine, "complete": complete_engine}
diff --git a/py/utils.py b/py/utils.py
index 2291a26..5e560b3 100644
--- a/py/utils.py
+++ b/py/utils.py
@@ -1,6 +1,10 @@
+import datetime
import sys
import os
+is_debugging = vim.bindeval("g:vim_ai_debug") == 1
+debug_log_file = vim.bindeval("g:vim_ai_debug_log_file")
+
def load_api_key():
config_file_path = os.path.join(os.path.expanduser("~"), ".config/openai.token")
api_key = os.getenv("OPENAI_API_KEY")
@@ -56,3 +60,9 @@ def parse_chat_messages(chat_content):
def vim_break_undo_sequence():
# breaks undo sequence (https://vi.stackexchange.com/a/29087)
vim.command("let &ul=&ul")
+
+def printDebug(text, *args):
+ if not is_debugging:
+ return
+ with open(debug_log_file, "a") as file:
+ file.write(f"[{datetime.datetime.now()}] " + text.format(*args) + "\n")