diff options
| author | Martin Bielik <mx.bielik@gmail.com> | 2023-04-11 17:30:16 +0200 |
|---|---|---|
| committer | Martin Bielik <mx.bielik@gmail.com> | 2023-04-11 17:30:16 +0200 |
| commit | 2d644e05545be6cd699eb5e834d6ba4468f12b41 (patch) | |
| tree | 0e74bccadde6e2e8555cfcccf90af3357083df94 /py | |
| parent | 8c97b5cfde56540c8e6619c8d56dc7056d49866f (diff) | |
| download | vim-ai-2d644e05545be6cd699eb5e834d6ba4468f12b41.tar.gz | |
added debug logging
Diffstat (limited to 'py')
| -rw-r--r-- | py/chat.py | 13 | ||||
| -rw-r--r-- | py/complete.py | 26 | ||||
| -rw-r--r-- | py/utils.py | 10 |
3 files changed, 43 insertions, 6 deletions
@@ -74,8 +74,17 @@ try: print('Answering...') vim.command("redraw") - response = openai.ChatCompletion.create(messages=messages, stream=True, **request_options) - text_chunks = map(lambda resp: resp['choices'][0]['delta'].get('content', ''), response) + request = { + 'stream': True, + 'messages': messages, + **request_options + } + printDebug("[chat] request: {}", request) + response = openai.ChatCompletion.create(**request) + def map_chunk(resp): + printDebug("[chat] response: {}", resp) + return resp['choices'][0]['delta'].get('content', '') + text_chunks = map(map_chunk, response) render_text_chunks(text_chunks) vim.command("normal! a\n\n>>> user\n\n") diff --git a/py/complete.py b/py/complete.py index b0b0dc5..eea2add 100644 --- a/py/complete.py +++ b/py/complete.py @@ -13,8 +13,17 @@ prompt = vim.eval("prompt").strip() openai.api_key = load_api_key() def complete_engine(prompt): - response = openai.Completion.create(stream=True, prompt=prompt, **request_options) - text_chunks = map(lambda resp: resp['choices'][0].get('text', ''), response) + request = { + 'stream': True, + 'prompt': prompt, + **request_options + } + printDebug("[engine-complete] request: {}", request) + response = openai.Completion.create(**request) + def map_chunk(resp): + printDebug("[engine-complete] response: {}", resp) + return resp['choices'][0].get('text', '') + text_chunks = map(map_chunk, response) return text_chunks def chat_engine(prompt): @@ -22,8 +31,17 @@ def chat_engine(prompt): initial_prompt = '\n'.join(initial_prompt) chat_content = f"{initial_prompt}\n\n>>> user\n\n{prompt}".strip() messages = parse_chat_messages(chat_content) - response = openai.ChatCompletion.create(messages=messages, stream=True, **request_options) - text_chunks = map(lambda resp: resp['choices'][0]['delta'].get('content', ''), response) + request = { + 'stream': True, + 'messages': messages, + **request_options + } + printDebug("[engine-chat] request: {}", request) + response = openai.ChatCompletion.create(**request) + def map_chunk(resp): + printDebug("[engine-chat] response: {}", resp) + return resp['choices'][0]['delta'].get('content', '') + text_chunks = map(map_chunk, response) return text_chunks engines = {"chat": chat_engine, "complete": complete_engine} diff --git a/py/utils.py b/py/utils.py index 2291a26..5e560b3 100644 --- a/py/utils.py +++ b/py/utils.py @@ -1,6 +1,10 @@ +import datetime import sys import os +is_debugging = vim.bindeval("g:vim_ai_debug") == 1 +debug_log_file = vim.bindeval("g:vim_ai_debug_log_file") + def load_api_key(): config_file_path = os.path.join(os.path.expanduser("~"), ".config/openai.token") api_key = os.getenv("OPENAI_API_KEY") @@ -56,3 +60,9 @@ def parse_chat_messages(chat_content): def vim_break_undo_sequence(): # breaks undo sequence (https://vi.stackexchange.com/a/29087) vim.command("let &ul=&ul") + +def printDebug(text, *args): + if not is_debugging: + return + with open(debug_log_file, "a") as file: + file.write(f"[{datetime.datetime.now()}] " + text.format(*args) + "\n") |