summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--py/chat.py24
-rw-r--r--py/complete.py3
2 files changed, 12 insertions, 15 deletions
diff --git a/py/chat.py b/py/chat.py
index 3d69074..a806bf3 100644
--- a/py/chat.py
+++ b/py/chat.py
@@ -9,24 +9,20 @@ request_options = make_request_options()
openai.api_key = load_api_key()
-file_content = vim.eval('trim(join(getline(1, "$"), "\n"))')
-initial_prompt = '\n'.join(options['initial_prompt'])
-prompt = f"{initial_prompt}\n{file_content}"
-
-lines = prompt.splitlines()
-messages = []
-
-chat_content = vim.eval('trim(join(getline(1, "$"), "\n"))')
-messages = parse_chat_messages(chat_content)
-
-if not messages:
- # roles not found, put whole file content as an user prompt
+lines = vim.eval('getline(1, "$")')
+contains_user_prompt = any(line == '>>> user' for line in lines)
+if not contains_user_prompt:
+ # user role not found, put whole file content as an user prompt
vim.command("normal! ggO>>> user\n")
vim.command("normal! G")
vim.command("let &ul=&ul") # breaks undo sequence (https://vi.stackexchange.com/a/29087)
vim.command("redraw")
- chat_content = vim.eval('trim(join(getline(1, "$"), "\n"))')
- messages = parse_chat_messages(chat_content)
+
+initial_prompt = options.get('initial_prompt', [])
+initial_prompt = '\n'.join(initial_prompt)
+file_content = vim.eval('trim(join(getline(1, "$"), "\n"))')
+chat_content = f"{initial_prompt}\n{file_content}"
+messages = parse_chat_messages(chat_content)
try:
if messages[-1]["content"].strip():
diff --git a/py/complete.py b/py/complete.py
index 96ae032..2f151d3 100644
--- a/py/complete.py
+++ b/py/complete.py
@@ -18,7 +18,8 @@ def complete_engine():
return text_chunks
def chat_engine():
- initial_prompt = options.get('initial_prompt', '')
+ initial_prompt = options.get('initial_prompt', [])
+ initial_prompt = '\n'.join(initial_prompt)
chat_content = f"{initial_prompt}\n\n>>> user\n\n{prompt}".strip()
messages = parse_chat_messages(chat_content)
response = openai.ChatCompletion.create(messages=messages, stream=True, **request_options)