From c1868ed4dcbb8acaf637b129f8a0f717a41cf5f2 Mon Sep 17 00:00:00 2001 From: Martin Bielik Date: Mon, 27 Mar 2023 22:49:07 +0200 Subject: chat initial prompt poc --- plugin/vim-ai.vim | 15 +++++++++++++++ py/chat.py | 13 ++++++++++--- py/complete.py | 4 ++-- py/utils.py | 13 ++++++++++--- 4 files changed, 37 insertions(+), 8 deletions(-) diff --git a/plugin/vim-ai.vim b/plugin/vim-ai.vim index 5105abd..0672597 100644 --- a/plugin/vim-ai.vim +++ b/plugin/vim-ai.vim @@ -14,12 +14,27 @@ let g:vim_ai_edit_default = { \ "request_timeout": 20, \ }, \} +let s:vim_ai_chat_initial_prompt =<< trim END +>>> user + +You are going to play a role of completion engine with following parameters: +Task: Provide compact code/text completion, generation, transformation or explanation +Topic: general programming and text editing +Style: Plain result without any commentary, unless commentary is necessary +Audience: Users of text editor and programmers that need to transform/generate text + +<<< assistant + +Okay + +END let g:vim_ai_chat_default = { \ "options": { \ "model": "gpt-3.5-turbo", \ "max_tokens": 1000, \ "temperature": 1, \ "request_timeout": 20, +\ "initial_prompt": s:vim_ai_chat_initial_prompt, \ }, \} if !exists('g:vim_ai_complete') diff --git a/py/chat.py b/py/chat.py index 0104014..6b7c2e5 100644 --- a/py/chat.py +++ b/py/chat.py @@ -5,13 +5,20 @@ plugin_root = vim.eval("s:plugin_root") vim.command(f"py3file {plugin_root}/py/utils.py") options = make_options() -file_content = vim.eval('trim(join(getline(1, "$"), "\n"))') +request_options = make_request_options() openai.api_key = load_api_key() -lines = file_content.splitlines() +file_content = vim.eval('trim(join(getline(1, "$"), "\n"))') +initial_prompt = '\n'.join(options['initial_prompt']) +prompt = f"{initial_prompt}\n{file_content}" + +lines = prompt.splitlines() messages = [] +with open('/tmp/prompt.aichat', 'w') as f: + f.write(prompt) + for line in lines: if line.startswith(">>> system"): messages.append({"role": "system", "content": ""}) @@ -38,7 +45,7 @@ try: print('Answering...') vim.command("redraw") - response = openai.ChatCompletion.create(messages=messages, stream=True, **options) + response = openai.ChatCompletion.create(messages=messages, stream=True, **request_options) generating_text = False for resp in response: diff --git a/py/complete.py b/py/complete.py index e474580..40f6efb 100644 --- a/py/complete.py +++ b/py/complete.py @@ -5,7 +5,7 @@ plugin_root = vim.eval("s:plugin_root") vim.command(f"py3file {plugin_root}/py/utils.py") prompt = vim.eval("prompt") -options = make_options() +request_options = make_request_options() openai.api_key = load_api_key() @@ -15,7 +15,7 @@ try: print('Completing...') vim.command("redraw") - response = openai.Completion.create(stream=True, prompt=prompt, **options) + response = openai.Completion.create(stream=True, prompt=prompt, **request_options) generating_text = False for resp in response: diff --git a/py/utils.py b/py/utils.py index 8366eec..3e5295a 100644 --- a/py/utils.py +++ b/py/utils.py @@ -15,7 +15,14 @@ def make_options(): options_default = vim.eval("options_default") options_user = vim.eval("options") options = {**options_default, **options_user} - options['request_timeout'] = float(options['request_timeout']) - options['temperature'] = float(options['temperature']) - options['max_tokens'] = int(options['max_tokens']) return options + +def make_request_options(): + options = make_options() + request_options = {} + request_options['model'] = options['model'] + request_options['max_tokens'] = int(options['max_tokens']) + request_options['temperature'] = float(options['temperature']) + request_options['request_timeout'] = float(options['request_timeout']) + return request_options + -- cgit v1.2.3