summaryrefslogtreecommitdiff
path: root/py
diff options
context:
space:
mode:
Diffstat (limited to 'py')
-rw-r--r--py/chat.py6
-rw-r--r--py/complete.py8
-rw-r--r--py/utils.py41
3 files changed, 35 insertions, 20 deletions
diff --git a/py/chat.py b/py/chat.py
index 9cb07c9..955832b 100644
--- a/py/chat.py
+++ b/py/chat.py
@@ -5,8 +5,6 @@ vim.command(f"py3file {plugin_root}/py/utils.py")
config_options = vim.eval("options")
config_ui = vim.eval("ui")
-openai.api_key = load_api_key()
-
def initialize_chat_window():
lines = vim.eval('getline(1, "$")')
contains_user_prompt = '>>> user' in lines
@@ -78,7 +76,7 @@ try:
**request_options
}
printDebug("[chat] request: {}", request)
- response = openai.ChatCompletion.create(**request)
+ response = openai_request('https://api.openai.com/v1/chat/completions', request)
def map_chunk(resp):
printDebug("[chat] response: {}", resp)
return resp['choices'][0]['delta'].get('content', '')
@@ -89,5 +87,3 @@ try:
vim.command("redraw")
except KeyboardInterrupt:
vim.command("normal! a Ctrl-C...")
-except openai.error.Timeout:
- vim.command("normal! aRequest timeout...")
diff --git a/py/complete.py b/py/complete.py
index f42cebc..8a8bf7d 100644
--- a/py/complete.py
+++ b/py/complete.py
@@ -8,8 +8,6 @@ request_options = make_request_options(config_options)
prompt = vim.eval("prompt").strip()
-openai.api_key = load_api_key()
-
def complete_engine(prompt):
request = {
'stream': True,
@@ -17,7 +15,7 @@ def complete_engine(prompt):
**request_options
}
printDebug("[engine-complete] request: {}", request)
- response = openai.Completion.create(**request)
+ response = openai_request('https://api.openai.com/v1/completions', request)
def map_chunk(resp):
printDebug("[engine-complete] response: {}", resp)
return resp['choices'][0].get('text', '')
@@ -35,7 +33,7 @@ def chat_engine(prompt):
**request_options
}
printDebug("[engine-chat] request: {}", request)
- response = openai.ChatCompletion.create(**request)
+ response = openai_request('https://api.openai.com/v1/chat/completions', request)
def map_chunk(resp):
printDebug("[engine-chat] response: {}", resp)
return resp['choices'][0]['delta'].get('content', '')
@@ -52,5 +50,3 @@ try:
render_text_chunks(text_chunks)
except KeyboardInterrupt:
vim.command("normal! a Ctrl-C...")
-except openai.error.Timeout:
- vim.command("normal! aRequest timeout...")
diff --git a/py/utils.py b/py/utils.py
index a0003a0..6fd161a 100644
--- a/py/utils.py
+++ b/py/utils.py
@@ -1,14 +1,9 @@
import datetime
import sys
import os
-
-try:
- import openai
-except ImportError:
- raise Exception("OpenAI module not found. Please install it with pip.")
-
-is_debugging = vim.eval("g:vim_ai_debug") == "1"
-debug_log_file = vim.eval("g:vim_ai_debug_log_file")
+import json
+import urllib.error
+import urllib.request
def load_api_key():
config_file_path = os.path.join(os.path.expanduser("~"), ".config/openai.token")
@@ -22,12 +17,14 @@ def load_api_key():
raise Exception("Missing OpenAI API key")
return api_key.strip()
+is_debugging = vim.eval("g:vim_ai_debug") == "1"
+debug_log_file = vim.eval("g:vim_ai_debug_log_file")
+
def make_request_options(options):
request_options = {}
request_options['model'] = options['model']
request_options['max_tokens'] = int(options['max_tokens'])
request_options['temperature'] = float(options['temperature'])
- request_options['request_timeout'] = float(options['request_timeout'])
return request_options
def render_text_chunks(chunks):
@@ -71,3 +68,29 @@ def printDebug(text, *args):
return
with open(debug_log_file, "a") as file:
file.write(f"[{datetime.datetime.now()}] " + text.format(*args) + "\n")
+
+OPENAI_RESP_DATA_PREFIX = 'data: '
+OPENAI_RESP_DONE = '[DONE]'
+OPENAI_API_KEY = load_api_key()
+
+def openai_request(url, data):
+ headers = {
+ "Content-Type": "application/json",
+ "Authorization": f"Bearer {OPENAI_API_KEY}"
+ }
+ req = urllib.request.Request(
+ url,
+ data=json.dumps(data).encode("utf-8"),
+ headers=headers,
+ method="POST",
+ )
+ with urllib.request.urlopen(req) as response:
+ for line_bytes in response:
+ line = line_bytes.decode("utf-8", errors="replace")
+ if line.startswith(OPENAI_RESP_DATA_PREFIX):
+ line_data = line[len(OPENAI_RESP_DATA_PREFIX):-1]
+ if line_data == OPENAI_RESP_DONE:
+ pass
+ else:
+ openai_obj = json.loads(line_data)
+ yield openai_obj