summaryrefslogtreecommitdiff
path: root/py
diff options
context:
space:
mode:
Diffstat (limited to 'py')
-rw-r--r--py/chat.py30
-rw-r--r--py/complete.py11
-rw-r--r--py/utils.py52
3 files changed, 48 insertions, 45 deletions
diff --git a/py/chat.py b/py/chat.py
index b0a97c0..547c849 100644
--- a/py/chat.py
+++ b/py/chat.py
@@ -25,34 +25,12 @@ def initialize_chat_window():
vim_break_undo_sequence()
vim.command("redraw")
-def parse_chat_header_options():
- try:
- options = {}
- lines = vim.eval('getline(1, "$")')
- contains_chat_options = '[chat-options]' in lines
- if contains_chat_options:
- # parse options that are defined in the chat header
- options_index = lines.index('[chat-options]')
- for line in lines[options_index + 1:]:
- if line.startswith('#'):
- # ignore comments
- continue
- if line == '':
- # stop at the end of the region
- break
- (key, value) = line.strip().split('=')
- if key == 'initial_prompt':
- value = value.split('\\n')
- options[key] = value
- return options
- except:
- raise Exception("Invalid [chat-options]")
-
initialize_chat_window()
chat_options = parse_chat_header_options()
options = {**config_options, **chat_options}
-request_options = make_request_options(options)
+openai_options = make_openai_options(options)
+http_options = make_http_options(options)
initial_prompt = '\n'.join(options.get('initial_prompt', []))
initial_messages = parse_chat_messages(initial_prompt)
@@ -73,10 +51,10 @@ try:
request = {
'stream': True,
'messages': messages,
- **request_options
+ **openai_options
}
printDebug("[chat] request: {}", request)
- response = openai_request('https://api.openai.com/v1/chat/completions', request)
+ response = openai_request('https://api.openai.com/v1/chat/completions', request, http_options)
def map_chunk(resp):
printDebug("[chat] response: {}", resp)
return resp['choices'][0]['delta'].get('content', '')
diff --git a/py/complete.py b/py/complete.py
index 669ef98..1045720 100644
--- a/py/complete.py
+++ b/py/complete.py
@@ -4,7 +4,8 @@ vim.command(f"py3file {plugin_root}/py/utils.py")
engine = vim.eval("l:engine")
config_options = vim.eval("l:options")
-request_options = make_request_options(config_options)
+openai_options = make_openai_options(config_options)
+http_options = make_http_options(config_options)
prompt = vim.eval("l:prompt").strip()
@@ -12,10 +13,10 @@ def complete_engine(prompt):
request = {
'stream': True,
'prompt': prompt,
- **request_options
+ **openai_options
}
printDebug("[engine-complete] request: {}", request)
- response = openai_request('https://api.openai.com/v1/completions', request)
+ response = openai_request('https://api.openai.com/v1/completions', request, http_options)
def map_chunk(resp):
printDebug("[engine-complete] response: {}", resp)
return resp['choices'][0].get('text', '')
@@ -30,10 +31,10 @@ def chat_engine(prompt):
request = {
'stream': True,
'messages': messages,
- **request_options
+ **openai_options
}
printDebug("[engine-chat] request: {}", request)
- response = openai_request('https://api.openai.com/v1/chat/completions', request)
+ response = openai_request('https://api.openai.com/v1/chat/completions', request, http_options)
def map_chunk(resp):
printDebug("[engine-chat] response: {}", resp)
return resp['choices'][0]['delta'].get('content', '')
diff --git a/py/utils.py b/py/utils.py
index 0703a3b..bd69401 100644
--- a/py/utils.py
+++ b/py/utils.py
@@ -7,6 +7,9 @@ import urllib.request
import socket
from urllib.error import URLError
+is_debugging = vim.eval("g:vim_ai_debug") == "1"
+debug_log_file = vim.eval("g:vim_ai_debug_log_file")
+
def load_api_key():
config_file_path = os.path.join(os.path.expanduser("~"), ".config/openai.token")
api_key = os.getenv("OPENAI_API_KEY")
@@ -19,16 +22,17 @@ def load_api_key():
raise Exception("Missing OpenAI API key")
return api_key.strip()
-is_debugging = vim.eval("g:vim_ai_debug") == "1"
-debug_log_file = vim.eval("g:vim_ai_debug_log_file")
+def make_openai_options(options):
+ return {
+ 'model': options['model'],
+ 'max_tokens': int(options['max_tokens']),
+ 'temperature': float(options['temperature']),
+ }
-def make_request_options(options):
- request_options = {}
- request_options['model'] = options['model']
- request_options['max_tokens'] = int(options['max_tokens'])
- request_options['temperature'] = float(options['temperature'])
- request_options['request_timeout'] = float(options['request_timeout'])
- return request_options
+def make_http_options(options):
+ return {
+ 'request_timeout': float(options['request_timeout']),
+ }
def render_text_chunks(chunks):
generating_text = False
@@ -62,6 +66,29 @@ def parse_chat_messages(chat_content):
return messages
+def parse_chat_header_options():
+ try:
+ options = {}
+ lines = vim.eval('getline(1, "$")')
+ contains_chat_options = '[chat-options]' in lines
+ if contains_chat_options:
+ # parse options that are defined in the chat header
+ options_index = lines.index('[chat-options]')
+ for line in lines[options_index + 1:]:
+ if line.startswith('#'):
+ # ignore comments
+ continue
+ if line == '':
+ # stop at the end of the region
+ break
+ (key, value) = line.strip().split('=')
+ if key == 'initial_prompt':
+ value = value.split('\\n')
+ options[key] = value
+ return options
+ except:
+ raise Exception("Invalid [chat-options]")
+
def vim_break_undo_sequence():
# breaks undo sequence (https://vi.stackexchange.com/a/29087)
vim.command("let &ul=&ul")
@@ -76,15 +103,12 @@ OPENAI_RESP_DATA_PREFIX = 'data: '
OPENAI_RESP_DONE = '[DONE]'
OPENAI_API_KEY = load_api_key()
-def openai_request(url, data):
+def openai_request(url, data, options):
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {OPENAI_API_KEY}"
}
- # request_timeout is a leftover from the time when openai-python was used
- # moving it somewhere else would mean a breaking change, for now handling this way
- request_timeout=data['request_timeout']
- del data['request_timeout']
+ request_timeout=options['request_timeout']
req = urllib.request.Request(
url,
data=json.dumps({ **data }).encode("utf-8"),