diff options
| author | Martin Bielik <mx.bielik@gmail.com> | 2023-03-13 20:52:45 +0100 |
|---|---|---|
| committer | Martin Bielik <mx.bielik@gmail.com> | 2023-03-13 20:52:45 +0100 |
| commit | 60f123341288df77a466528f2f1875f81c0cc450 (patch) | |
| tree | 975c2c3cc7b72bd1dfe61691af15e9722ef1ea1d /py | |
| parent | 81d6561754df9f7d544bbab0047e09ac27f86728 (diff) | |
| download | vim-ai-60f123341288df77a466528f2f1875f81c0cc450.tar.gz | |
stream complete/edit commands
Diffstat (limited to '')
| -rw-r--r-- | py/chat.py | 32 | ||||
| -rw-r--r-- | py/complete.py | 25 |
2 files changed, 36 insertions, 21 deletions
@@ -29,20 +29,24 @@ if not messages: file_content = ">>> user\n\n" + file_content messages.append({"role": "user", "content": file_content }) -vim.command("normal! Go\n<<< assistant\n\n") -vim.command("redraw") - -response = openai.ChatCompletion.create( - model="gpt-3.5-turbo", - messages=messages, - stream=True, -) - -for resp in response: - if 'content' in resp['choices'][0]['delta']: - text = resp['choices'][0]['delta']['content'] +if messages[-1]["content"].strip(): + + vim.command("normal! Go\n<<< assistant\n\n") + vim.command("redraw") + + response = openai.ChatCompletion.create( + model="gpt-3.5-turbo", + messages=messages, + stream=True, + ) + + generating_text = False + for resp in response: + text = resp['choices'][0]['delta'].get('content', '') + if not text.strip() and not generating_text: + continue # trim newlines from the beginning vim.command("normal! a" + text) vim.command("redraw") -vim.command("normal! a\n\n>>> user\n") -vim.command("redraw") + vim.command("normal! a\n\n>>> user\n\n") + vim.command("redraw") diff --git a/py/complete.py b/py/complete.py index c37cbd0..705dd90 100644 --- a/py/complete.py +++ b/py/complete.py @@ -8,11 +8,22 @@ prompt = vim.eval("prompt") openai.api_key = load_api_key() -response = openai.Completion.create( - model="text-davinci-003", - prompt=prompt, - max_tokens=1000, - temperature=0.1 -) +if prompt.strip(): -output = response['choices'][0]['text'] + response = openai.Completion.create( + model="text-davinci-003", + prompt=prompt, + max_tokens=1000, + temperature=0.1, + stream=True, + ) + + generating_text = False + for resp in response: + text = resp['choices'][0].get('text', '') + if not text.strip() and not generating_text: + continue # trim newlines from the beginning + + generating_text = True + vim.command("normal! a" + text) + vim.command("redraw") |