diff options
| -rw-r--r-- | plugin/vim-ai.vim | 7 | ||||
| -rw-r--r-- | py/chat.py | 41 | ||||
| -rw-r--r-- | py/complete.py | 35 | ||||
| -rw-r--r-- | py/utils.py | 37 |
4 files changed, 69 insertions, 51 deletions
diff --git a/plugin/vim-ai.vim b/plugin/vim-ai.vim index b117598..c2b5faf 100644 --- a/plugin/vim-ai.vim +++ b/plugin/vim-ai.vim @@ -1,4 +1,5 @@ let g:vim_ai_complete_default = { +\ "engine": "complete", \ "options": { \ "model": "text-davinci-003", \ "max_tokens": 1000, @@ -7,6 +8,7 @@ let g:vim_ai_complete_default = { \ }, \} let g:vim_ai_edit_default = { +\ "engine": "complete", \ "options": { \ "model": "text-davinci-003", \ "max_tokens": 1000, @@ -26,9 +28,9 @@ Audience: Users of text editor and programmers that need to transform/generate t <<< assistant Okay - END let g:vim_ai_chat_default = { +\ "engine": "chat", \ "options": { \ "model": "gpt-3.5-turbo", \ "max_tokens": 1000, @@ -90,6 +92,7 @@ function! AIRun(is_selection, ...) range let s:last_instruction = instruction let s:last_is_selection = a:is_selection + let engine = g:vim_ai_complete['engine'] let options = g:vim_ai_complete['options'] let cursor_on_empty_line = trim(join(lines, "\n")) == "" set paste @@ -111,6 +114,7 @@ function! AIEditRun(is_selection, ...) range let s:last_instruction = instruction let s:last_is_selection = a:is_selection + let engine = g:vim_ai_edit['engine'] let options = g:vim_ai_edit['options'] set paste execute "normal! " . a:firstline . "GV" . a:lastline . "Gc" @@ -138,6 +142,7 @@ function! AIChatRun(is_selection, ...) range let s:last_instruction = instruction let s:last_is_selection = a:is_selection + let engine = g:vim_ai_chat['engine'] let options = g:vim_ai_chat['options'] execute "py3file " . s:chat_py set nopaste @@ -16,26 +16,8 @@ prompt = f"{initial_prompt}\n{file_content}" lines = prompt.splitlines() messages = [] -def parse_messages(): - file_content = vim.eval('trim(join(getline(1, "$"), "\n"))') - lines = file_content.splitlines() - messages = [] - for line in lines: - if line.startswith(">>> system"): - messages.append({"role": "system", "content": ""}) - continue - if line.startswith(">>> user"): - messages.append({"role": "user", "content": ""}) - continue - if line.startswith("<<< assistant"): - messages.append({"role": "assistant", "content": ""}) - continue - if not messages: - continue - messages[-1]["content"] += "\n" + line - return messages - -messages = parse_messages() +chat_content = vim.eval('trim(join(getline(1, "$"), "\n"))') +messages = parse_chat_messages(chat_content) if not messages: # roles not found, put whole file content as an user prompt @@ -43,11 +25,8 @@ if not messages: vim.command("normal! G") vim.command("let &ul=&ul") # breaks undo sequence (https://vi.stackexchange.com/a/29087) vim.command("redraw") - messages = parse_messages() - -for message in messages: - # strip newlines from the content as it causes empty responses - message["content"] = message["content"].strip() + chat_content = vim.eval('trim(join(getline(1, "$"), "\n"))') + messages = parse_chat_messages(chat_content) try: if messages[-1]["content"].strip(): @@ -58,16 +37,8 @@ try: vim.command("redraw") response = openai.ChatCompletion.create(messages=messages, stream=True, **request_options) - - generating_text = False - for resp in response: - text = resp['choices'][0]['delta'].get('content', '') - if not text.strip() and not generating_text: - continue # trim newlines from the beginning - - generating_text = True - vim.command("normal! a" + text) - vim.command("redraw") + text_chunks = map(lambda resp: resp['choices'][0]['delta'].get('content', ''), response) + render_text_chunks(text_chunks) vim.command("normal! a\n\n>>> user\n\n") vim.command("redraw") diff --git a/py/complete.py b/py/complete.py index bddd8e6..96ae032 100644 --- a/py/complete.py +++ b/py/complete.py @@ -4,28 +4,35 @@ import openai plugin_root = vim.eval("s:plugin_root") vim.command(f"py3file {plugin_root}/py/utils.py") -prompt = vim.eval("prompt").strip() +engine = vim.eval("engine") +options = make_options() request_options = make_request_options() +prompt = vim.eval("prompt").strip() + openai.api_key = load_api_key() +def complete_engine(): + response = openai.Completion.create(stream=True, prompt=prompt, **request_options) + text_chunks = map(lambda resp: resp['choices'][0].get('text', ''), response) + return text_chunks + +def chat_engine(): + initial_prompt = options.get('initial_prompt', '') + chat_content = f"{initial_prompt}\n\n>>> user\n\n{prompt}".strip() + messages = parse_chat_messages(chat_content) + response = openai.ChatCompletion.create(messages=messages, stream=True, **request_options) + text_chunks = map(lambda resp: resp['choices'][0]['delta'].get('content', ''), response) + return text_chunks + +engines = {"chat": chat_engine, "complete": complete_engine} + try: if prompt: - print('Completing...') vim.command("redraw") - - response = openai.Completion.create(stream=True, prompt=prompt, **request_options) - - generating_text = False - for resp in response: - text = resp['choices'][0].get('text', '') - if not text.strip() and not generating_text: - continue # trim newlines from the beginning - - generating_text = True - vim.command("normal! a" + text) - vim.command("redraw") + text_chunks = engines[engine]() + render_text_chunks(text_chunks) except KeyboardInterrupt: vim.command("normal! a Ctrl-C...") except openai.error.Timeout: diff --git a/py/utils.py b/py/utils.py index 02ecfef..68a4ee9 100644 --- a/py/utils.py +++ b/py/utils.py @@ -11,8 +11,11 @@ def load_api_key(): pass return api_key.strip() +def make_options(): + return vim.eval("options") + def make_request_options(): - options = vim.eval("options") + options = make_options() request_options = {} request_options['model'] = options['model'] request_options['max_tokens'] = int(options['max_tokens']) @@ -20,3 +23,35 @@ def make_request_options(): request_options['request_timeout'] = float(options['request_timeout']) return request_options +def render_text_chunks(chunks): + generating_text = False + for text in chunks: + if not text.strip() and not generating_text: + continue # trim newlines from the beginning + generating_text = True + vim.command("normal! a" + text) + vim.command("redraw") + +def parse_chat_messages(chat_content): + lines = chat_content.splitlines() + messages = [] + for line in lines: + if line.startswith(">>> system"): + messages.append({"role": "system", "content": ""}) + continue + if line.startswith(">>> user"): + messages.append({"role": "user", "content": ""}) + continue + if line.startswith("<<< assistant"): + messages.append({"role": "assistant", "content": ""}) + continue + if not messages: + continue + messages[-1]["content"] += "\n" + line + + for message in messages: + # strip newlines from the content as it causes empty responses + message["content"] = message["content"].strip() + + return messages + |