1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
|
from urllib.parse import urljoin
# import utils
plugin_root = vim.eval("s:plugin_root")
vim.command(f"py3file {plugin_root}/py/utils.py")
config = normalize_config(vim.eval("l:config"))
engine = config['engine']
config_options = config['options']
openai_options = make_openai_options(config_options)
http_options = make_http_options(config_options)
prompt = vim.eval("l:prompt").strip()
def complete_engine(prompt):
request = {
'stream': True,
'prompt': prompt,
**openai_options
}
printDebug("[engine-complete] request: {}", request)
url = urljoin(config_options['base_url'], 'v1/completions')
response = openai_request(url, request, http_options)
def map_chunk(resp):
printDebug("[engine-complete] response: {}", resp)
return resp['choices'][0].get('text', '')
text_chunks = map(map_chunk, response)
return text_chunks
def chat_engine(prompt):
initial_prompt = config_options.get('initial_prompt', [])
initial_prompt = '\n'.join(initial_prompt)
chat_content = f"{initial_prompt}\n\n>>> user\n\n{prompt}".strip()
messages = parse_chat_messages(chat_content)
request = {
'stream': True,
'messages': messages,
**openai_options
}
printDebug("[engine-chat] request: {}", request)
url = urljoin(config_options['base_url'], 'v1/chat/completions')
response = openai_request(url, request, http_options)
def map_chunk(resp):
printDebug("[engine-chat] response: {}", resp)
return resp['choices'][0]['delta'].get('content', '')
text_chunks = map(map_chunk, response)
return text_chunks
engines = {"chat": chat_engine, "complete": complete_engine}
try:
if prompt:
print('Completing...')
vim.command("redraw")
text_chunks = engines[engine](prompt)
render_text_chunks(text_chunks)
clear_echo_message()
except BaseException as error:
handle_completion_error(error)
printDebug("[complete] error: {}", traceback.format_exc())
|