summaryrefslogtreecommitdiff
path: root/py/utils.py
diff options
context:
space:
mode:
authorMartin Bielik <martin.bielik@instea.sk>2024-12-15 23:32:55 +0100
committerMartin Bielik <martin.bielik@instea.sk>2024-12-16 00:08:23 +0100
commitcb2ac5f7a672faeb7b81886b6c1f1481ef51f90d (patch)
tree12a9045749c3468e57555a13320871937bbb54e7 /py/utils.py
parentf26bee941bf9e5a5452ee0d75e7f2f2ea3c5216a (diff)
downloadvim-ai-cb2ac5f7a672faeb7b81886b6c1f1481ef51f90d.tar.gz
refactoring: import python when needed, run as functions
Diffstat (limited to 'py/utils.py')
-rw-r--r--py/utils.py46
1 files changed, 19 insertions, 27 deletions
diff --git a/py/utils.py b/py/utils.py
index ae32bc2..ce95ed6 100644
--- a/py/utils.py
+++ b/py/utils.py
@@ -13,12 +13,17 @@ from urllib.error import HTTPError
import traceback
import configparser
-is_debugging = vim.eval("g:vim_ai_debug") == "1"
-debug_log_file = vim.eval("g:vim_ai_debug_log_file")
+utils_py_imported = True
+
+def is_ai_debugging():
+ return vim.eval("g:vim_ai_debug") == "1"
class KnownError(Exception):
pass
+def unwrap(input_var):
+ return vim.eval(input_var)
+
def load_api_key(config_token_file_path):
# token precedence: config file path, global file path, env variable
global_token_file_path = vim.eval("g:vim_ai_token_file_path")
@@ -72,32 +77,19 @@ def make_http_options(options):
'token_file_path': options['token_file_path'],
}
-# During text manipulation in Vim's visual mode, we utilize "normal! c" command. This command deletes the highlighted text,
-# immediately followed by entering insert mode where it generates desirable text.
-
-# Normally, Vim contemplates the position of the first character in selection to decide whether to place the entered text
-# before or after the cursor. For instance, if the given line is "abcd", and "abc" is selected for deletion and "1234" is
-# written in its place, the result is as expected "1234d" rather than "d1234". However, if "bc" is chosen for deletion, the
-# achieved output is "a1234d", whereas "1234ad" is not.
-
-# Despite this, post Vim script's execution of "normal! c", it takes an exit immediately returning to the normal mode. This
-# might trigger a potential misalignment issue especially when the most extreme left character is the line’s second character.
-
-# To avoid such pitfalls, the method "need_insert_before_cursor" checks not only the selection status, but also the character
-# at the first position of the highlighting. If the selection is off or the first position is not the second character in the line,
-# it determines no need for prefixing the cursor.
-def need_insert_before_cursor(is_selection):
- if is_selection == False:
- return False
+# when running AIEdit on selection and cursor ends on the first column, it needs to
+# be decided whether to append (a) or insert (i) to prevent missalignment.
+# Example: helloxxx<Esc>hhhvb:AIE translate<CR> - expected Holaxxx, not xHolaxx
+def need_insert_before_cursor():
pos = vim.eval("getpos(\"'<\")[1:2]")
if not isinstance(pos, list) or len(pos) != 2:
raise ValueError("Unexpected getpos value, it should be a list with two elements")
return pos[1] == "1" # determines if visual selection starts on the first window column
-def render_text_chunks(chunks, is_selection):
+def render_text_chunks(chunks):
generating_text = False
full_text = ''
- insert_before_cursor = need_insert_before_cursor(is_selection)
+ insert_before_cursor = need_insert_before_cursor()
for text in chunks:
if not generating_text:
text = text.lstrip() # trim newlines from the beginning
@@ -200,10 +192,10 @@ def vim_break_undo_sequence():
# breaks undo sequence (https://vi.stackexchange.com/a/29087)
vim.command("let &ul=&ul")
-def printDebug(text, *args):
- if not is_debugging:
+def print_debug(text, *args):
+ if not is_ai_debugging():
return
- with open(debug_log_file, "a") as file:
+ with open(vim.eval("g:vim_ai_debug_log_file"), "a") as file:
message = text.format(*args) if len(args) else text
file.write(f"[{datetime.datetime.now()}] " + message + "\n")
@@ -301,7 +293,7 @@ def make_chat_text_chunks(messages, config_options):
'messages': messages,
**openai_options
}
- printDebug("[engine-chat] request: {}", request)
+ print_debug("[engine-chat] request: {}", request)
url = config_options['endpoint_url']
response = openai_request(url, request, http_options)
@@ -315,11 +307,11 @@ def make_chat_text_chunks(messages, config_options):
return choices
def map_chunk_no_stream(resp):
- printDebug("[engine-chat] response: {}", resp)
+ print_debug("[engine-chat] response: {}", resp)
return _choices(resp)[0].get('message', {}).get('content', '')
def map_chunk_stream(resp):
- printDebug("[engine-chat] response: {}", resp)
+ print_debug("[engine-chat] response: {}", resp)
return _choices(resp)[0].get('delta', {}).get('content', '')
map_chunk = map_chunk_stream if openai_options['stream'] else map_chunk_no_stream