diff options
| -rw-r--r-- | plugin/vim-ai.vim | 15 | ||||
| -rw-r--r-- | py/chat.py | 12 | ||||
| -rw-r--r-- | py/complete.py | 4 | ||||
| -rw-r--r-- | py/utils.py | 15 |
4 files changed, 36 insertions, 10 deletions
diff --git a/plugin/vim-ai.vim b/plugin/vim-ai.vim index dba5998..b117598 100644 --- a/plugin/vim-ai.vim +++ b/plugin/vim-ai.vim @@ -14,12 +14,27 @@ let g:vim_ai_edit_default = { \ "request_timeout": 20, \ }, \} +let s:vim_ai_chat_initial_prompt =<< trim END +>>> user + +You are going to play a role of completion engine with following parameters: +Task: Provide compact code/text completion, generation, transformation or explanation +Topic: general programming and text editing +Style: Plain result without any commentary, unless commentary is necessary +Audience: Users of text editor and programmers that need to transform/generate text + +<<< assistant + +Okay + +END let g:vim_ai_chat_default = { \ "options": { \ "model": "gpt-3.5-turbo", \ "max_tokens": 1000, \ "temperature": 1, \ "request_timeout": 20, +\ "initial_prompt": s:vim_ai_chat_initial_prompt, \ }, \ "ui": { \ "open_chat_command": "below new | call vim_ai#MakeScratchWindow()" @@ -4,10 +4,18 @@ import openai plugin_root = vim.eval("s:plugin_root") vim.command(f"py3file {plugin_root}/py/utils.py") -options = make_options() +options = vim.eval("options") +request_options = make_request_options() openai.api_key = load_api_key() +file_content = vim.eval('trim(join(getline(1, "$"), "\n"))') +initial_prompt = '\n'.join(options['initial_prompt']) +prompt = f"{initial_prompt}\n{file_content}" + +lines = prompt.splitlines() +messages = [] + def parse_messages(): file_content = vim.eval('trim(join(getline(1, "$"), "\n"))') lines = file_content.splitlines() @@ -49,7 +57,7 @@ try: print('Answering...') vim.command("redraw") - response = openai.ChatCompletion.create(messages=messages, stream=True, **options) + response = openai.ChatCompletion.create(messages=messages, stream=True, **request_options) generating_text = False for resp in response: diff --git a/py/complete.py b/py/complete.py index 14667b0..bddd8e6 100644 --- a/py/complete.py +++ b/py/complete.py @@ -5,7 +5,7 @@ plugin_root = vim.eval("s:plugin_root") vim.command(f"py3file {plugin_root}/py/utils.py") prompt = vim.eval("prompt").strip() -options = make_options() +request_options = make_request_options() openai.api_key = load_api_key() @@ -15,7 +15,7 @@ try: print('Completing...') vim.command("redraw") - response = openai.Completion.create(stream=True, prompt=prompt, **options) + response = openai.Completion.create(stream=True, prompt=prompt, **request_options) generating_text = False for resp in response: diff --git a/py/utils.py b/py/utils.py index 814a4f0..02ecfef 100644 --- a/py/utils.py +++ b/py/utils.py @@ -11,9 +11,12 @@ def load_api_key(): pass return api_key.strip() -def make_options(): - options = {**vim.eval("options")} - options['request_timeout'] = float(options['request_timeout']) - options['temperature'] = float(options['temperature']) - options['max_tokens'] = int(options['max_tokens']) - return options +def make_request_options(): + options = vim.eval("options") + request_options = {} + request_options['model'] = options['model'] + request_options['max_tokens'] = int(options['max_tokens']) + request_options['temperature'] = float(options['temperature']) + request_options['request_timeout'] = float(options['request_timeout']) + return request_options + |