summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMartin Bielik <martin.bielik@instea.sk>2024-12-15 10:46:21 +0100
committerMartin Bielik <martin.bielik@instea.sk>2024-12-15 10:58:00 +0100
commit6bf889156f2ca8cecdc14ff8a882e4ed043d152e (patch)
tree4d53b664dc1fd186937824120394cadb90b680e7
parent6554bf7c3c5ff16a4727260a8406f7a989a56598 (diff)
downloadvim-ai-6bf889156f2ca8cecdc14ff8a882e4ed043d152e.tar.gz
unified config parsing + tests
-rw-r--r--.gitignore2
-rw-r--r--autoload/vim_ai.vim44
-rw-r--r--py/chat.py3
-rw-r--r--py/complete.py3
-rw-r--r--py/config.py105
-rw-r--r--py/utils.py108
-rw-r--r--pytest.ini4
-rw-r--r--tests/config_test.py133
-rw-r--r--tests/mocks/vim.py5
-rw-r--r--tests/resources/roles.ini16
10 files changed, 332 insertions, 91 deletions
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..9072039
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,2 @@
+__pycache__
+env
diff --git a/autoload/vim_ai.vim b/autoload/vim_ai.vim
index 14342a0..ec3e8e1 100644
--- a/autoload/vim_ai.vim
+++ b/autoload/vim_ai.vim
@@ -4,6 +4,7 @@ let s:plugin_root = expand('<sfile>:p:h:h')
let s:complete_py = s:plugin_root . "/py/complete.py"
let s:chat_py = s:plugin_root . "/py/chat.py"
let s:roles_py = s:plugin_root . "/py/roles.py"
+let s:config_py = s:plugin_root . "/py/config.py"
" remembers last command parameters to be used in AIRedoRun
let s:last_is_selection = 0
@@ -45,7 +46,7 @@ function! s:OpenChatWindow(open_conf, force_new) abort
execute l:open_cmd
" reuse chat in keep-open mode
- let l:keep_open = g:vim_ai_chat['ui']['scratch_buffer_keep_open']
+ let l:keep_open = g:vim_ai_chat['ui']['scratch_buffer_keep_open'] == '1'
let l:last_scratch_buffer_name = s:GetLastScratchBufferName()
if l:keep_open && bufexists(l:last_scratch_buffer_name) && !a:force_new
let l:current_buffer = bufnr('%')
@@ -102,7 +103,7 @@ endfunction
let s:is_handling_paste_mode = 0
function! s:set_paste(config)
- if !&paste && a:config['ui']['paste_mode']
+ if !&paste && a:config['ui']['paste_mode'] == '1'
let s:is_handling_paste_mode = 1
setlocal paste
endif
@@ -151,8 +152,18 @@ endfunction
" - config - function scoped vim_ai_complete config
" - a:1 - optional instruction prompt
function! vim_ai#AIRun(uses_range, config, ...) range abort
- let l:config = vim_ai_config#ExtendDeep(g:vim_ai_complete, a:config)
let l:instruction = a:0 > 0 ? a:1 : ""
+ let l:config_input = {
+ \ "config_default": g:vim_ai_edit,
+ \ "config_extension": a:config,
+ \ "instruction": l:instruction,
+ \ "command_type": 'complete',
+ \}
+ execute "py3file " . s:config_py
+ execute "py3 make_config('l:config_input', 'l:config_output')"
+ let l:config = l:config_output['config']
+ let l:role_prompt = l:config_output['role_prompt']
+
" l:is_selection used in Python script
let l:is_selection = a:uses_range && a:firstline == line("'<") && a:lastline == line("'>")
let l:selection = s:GetSelectionOrRange(l:is_selection, a:uses_range, a:firstline, a:lastline)
@@ -185,8 +196,18 @@ endfunction
" - config - function scoped vim_ai_edit config
" - a:1 - optional instruction prompt
function! vim_ai#AIEditRun(uses_range, config, ...) range abort
- let l:config = vim_ai_config#ExtendDeep(g:vim_ai_edit, a:config)
let l:instruction = a:0 > 0 ? a:1 : ""
+ let l:config_input = {
+ \ "config_default": g:vim_ai_edit,
+ \ "config_extension": a:config,
+ \ "instruction": l:instruction,
+ \ "command_type": 'complete',
+ \}
+ execute "py3file " . s:config_py
+ execute "py3 make_config('l:config_input', 'l:config_output')"
+ let l:config = l:config_output['config']
+ let l:role_prompt = l:config_output['role_prompt']
+
" l:is_selection used in Python script
let l:is_selection = a:uses_range && a:firstline == line("'<") && a:lastline == line("'>")
let l:selection = s:GetSelectionOrRange(l:is_selection, a:uses_range, a:firstline, a:lastline)
@@ -248,8 +269,18 @@ endfunction
" - config - function scoped vim_ai_chat config
" - a:1 - optional instruction prompt
function! vim_ai#AIChatRun(uses_range, config, ...) range abort
- let l:config = vim_ai_config#ExtendDeep(g:vim_ai_chat, a:config)
- let l:instruction = ""
+ let l:instruction = a:0 > 0 ? a:1 : ""
+ let l:config_input = {
+ \ "config_default": g:vim_ai_chat,
+ \ "config_extension": a:config,
+ \ "instruction": l:instruction,
+ \ "command_type": 'chat',
+ \}
+ execute "py3file " . s:config_py
+ execute "py3 make_config('l:config_input', 'l:config_output')"
+ let l:config = l:config_output['config']
+ let l:role_prompt = l:config_output['role_prompt']
+
" l:is_selection used in Python script
let l:is_selection = a:uses_range && a:firstline == line("'<") && a:lastline == line("'>")
let l:selection = s:GetSelectionOrRange(l:is_selection, a:uses_range, a:firstline, a:lastline)
@@ -260,7 +291,6 @@ function! vim_ai#AIChatRun(uses_range, config, ...) range abort
let l:prompt = ""
if a:0 > 0 || a:uses_range
- let l:instruction = a:0 > 0 ? a:1 : ""
let l:prompt = s:MakePrompt(l:selection, l:instruction, l:config)
endif
diff --git a/py/chat.py b/py/chat.py
index 2aedb13..7cda7c0 100644
--- a/py/chat.py
+++ b/py/chat.py
@@ -4,7 +4,8 @@ import vim
plugin_root = vim.eval("s:plugin_root")
vim.command(f"py3file {plugin_root}/py/utils.py")
-prompt, config = load_config_and_prompt('chat')
+prompt = make_prompt(vim.eval("l:prompt"), vim.eval("l:role_prompt"))
+config = make_config(vim.eval("l:config"))
config_options = config['options']
config_ui = config['ui']
diff --git a/py/complete.py b/py/complete.py
index 8d85581..9b63b0b 100644
--- a/py/complete.py
+++ b/py/complete.py
@@ -4,7 +4,8 @@ import vim
plugin_root = vim.eval("s:plugin_root")
vim.command(f"py3file {plugin_root}/py/utils.py")
-prompt, config = load_config_and_prompt('complete')
+prompt = make_prompt(vim.eval("l:prompt"), vim.eval("l:role_prompt"))
+config = make_config(vim.eval("l:config"))
config_options = config['options']
config_ui = config['ui']
diff --git a/py/config.py b/py/config.py
new file mode 100644
index 0000000..03eb3ca
--- /dev/null
+++ b/py/config.py
@@ -0,0 +1,105 @@
+import vim
+import re
+import os
+import configparser
+
+def merge_deep_recursive(target, source = {}):
+ source = source.copy()
+ for key, value in source.items():
+ if isinstance(value, dict):
+ target_child = target.setdefault(key, {})
+ merge_deep_recursive(target_child, value)
+ else:
+ target[key] = value
+ return target
+
+def merge_deep(objects):
+ result = {}
+ for o in objects:
+ merge_deep_recursive(result, o)
+ return result
+
+def enhance_roles_with_custom_function(roles):
+ if vim.eval("exists('g:vim_ai_roles_config_function')") == '1':
+ roles_config_function = vim.eval("g:vim_ai_roles_config_function")
+ if not vim.eval("exists('*" + roles_config_function + "')"):
+ raise Exception(f"Role config function does not exist: {roles_config_function}")
+ else:
+ roles.update(vim.eval(roles_config_function + "()"))
+
+def load_role_config(role):
+ roles_config_path = os.path.expanduser(vim.eval("g:vim_ai_roles_config_file"))
+ if not os.path.exists(roles_config_path):
+ raise Exception(f"Role config file does not exist: {roles_config_path}")
+
+ roles = configparser.ConfigParser()
+ roles.read(roles_config_path)
+ roles = dict(roles)
+
+ enhance_roles_with_custom_function(roles)
+
+ if not role in roles:
+ raise Exception(f"Role `{role}` not found")
+
+ options = roles.get(f"{role}.options", {})
+ options_complete = roles.get(f"{role}.options-complete", {})
+ options_chat = roles.get(f"{role}.options-chat", {})
+
+ ui = roles.get(f"{role}.ui", {})
+ ui_complete = roles.get(f"{role}.ui-complete", {})
+ ui_chat = roles.get(f"{role}.ui-chat", {})
+
+ return {
+ 'role': dict(roles[role]),
+ 'config_default': {
+ 'options': dict(options),
+ 'ui': dict(ui),
+ },
+ 'config_complete': {
+ 'options': dict(options_complete),
+ 'ui': dict(ui_complete),
+ },
+ 'config_chat': {
+ 'options': dict(options_chat),
+ 'ui': dict(ui_chat),
+ },
+ }
+
+def parse_role_names(prompt):
+ chunks = re.split(r'[ :]+', prompt)
+ roles = []
+ for chunk in chunks:
+ if not chunk.startswith("/"):
+ break
+ roles.append(chunk)
+ return [raw_role[1:] for raw_role in roles]
+
+def parse_prompt_and_role_config(instruction, command_type):
+ instruction = instruction.strip()
+ roles = parse_role_names(instruction)
+ if not roles:
+ # does not require role
+ return ('', {})
+
+ last_role = roles[-1]
+ role_configs = merge_deep([load_role_config(role) for role in roles])
+ config = merge_deep([role_configs['config_default'], role_configs['config_' + command_type]])
+ role_prompt = role_configs['role'].get('prompt', '')
+ return role_prompt, config
+
+def make_config(input_var, output_var):
+ input_options = vim.eval(input_var)
+ config_default = input_options['config_default']
+ config_extension = input_options['config_extension']
+ instruction = input_options['instruction']
+ command_type = input_options['command_type']
+
+ role_prompt, role_config = parse_prompt_and_role_config(instruction, command_type)
+
+ final_config = merge_deep([config_default, config_extension, role_config])
+
+ output = {}
+ output['config'] = final_config
+ output['role_prompt'] = role_prompt
+ vim.command(f'let {output_var}={output}')
+ return output
diff --git a/py/utils.py b/py/utils.py
index d1855a7..fc888ab 100644
--- a/py/utils.py
+++ b/py/utils.py
@@ -44,21 +44,36 @@ def load_api_key(config_token_file_path):
return (api_key, org_id)
-def load_config_and_prompt(command_type):
- prompt, role_options = parse_prompt_and_role(vim.eval("l:prompt"))
- config = vim.eval("l:config")
- config['options'] = {
- **normalize_options(config['options']),
- **normalize_options(role_options['options_default']),
- **normalize_options(role_options['options_' + command_type]),
- }
- return prompt, config
+def strip_roles(prompt):
+ chunks = re.split(r'[ :]+', prompt)
+ roles = []
+ for chunk in chunks:
+ if not chunk.startswith("/"):
+ break
+ roles.append(chunk)
+ if not roles:
+ return prompt
+ last_role = roles[-1]
+ return prompt[prompt.index(last_role) + len(last_role):].strip()
-def normalize_options(options):
+def make_prompt(raw_prompt, role_prompt):
+ prompt = raw_prompt.strip()
+ prompt = strip_roles(prompt)
+
+ if not role_prompt:
+ return prompt
+
+ delim = '' if prompt.startswith(':') else ':\n'
+ prompt = role_prompt + delim + prompt
+
+ return prompt
+
+def make_config(config):
+ options = config['options']
# initial prompt can be both a string and a list of strings, normalize it to list
if 'initial_prompt' in options and isinstance(options['initial_prompt'], str):
options['initial_prompt'] = options['initial_prompt'].split('\n')
- return options
+ return config
def make_openai_options(options):
max_tokens = int(options['max_tokens'])
@@ -302,77 +317,6 @@ def enhance_roles_with_custom_function(roles):
else:
roles.update(vim.eval(roles_config_function + "()"))
-def load_role_config(role):
- roles_config_path = os.path.expanduser(vim.eval("g:vim_ai_roles_config_file"))
- if not os.path.exists(roles_config_path):
- raise Exception(f"Role config file does not exist: {roles_config_path}")
-
- roles = configparser.ConfigParser()
- roles.read(roles_config_path)
-
- enhance_roles_with_custom_function(roles)
-
- if not role in roles:
- raise Exception(f"Role `{role}` not found")
-
- options = roles[f"{role}.options"] if f"{role}.options" in roles else {}
- options_complete =roles[f"{role}.options-complete"] if f"{role}.options-complete" in roles else {}
- options_chat = roles[f"{role}.options-chat"] if f"{role}.options-chat" in roles else {}
-
- return {
- 'role': dict(roles[role]),
- 'options': {
- 'options_default': dict(options),
- 'options_complete': dict(options_complete),
- 'options_chat': dict(options_chat),
- },
- }
-
-empty_role_options = {
- 'options_default': {},
- 'options_complete': {},
- 'options_chat': {},
-}
-
-def parse_roles(prompt):
- chunks = re.split(r'[ :]+', prompt)
- roles = []
- for chunk in chunks:
- if not chunk.startswith("/"):
- break
- roles.append(chunk)
- return [raw_role[1:] for raw_role in roles]
-
-def merge_role_configs(configs):
- merged_options = empty_role_options
- merged_role = {}
- for config in configs:
- options = config['options']
- merged_options = {
- 'options_default': { **merged_options['options_default'], **options['options_default'] },
- 'options_complete': { **merged_options['options_complete'], **options['options_complete'] },
- 'options_chat': { **merged_options['options_chat'], **options['options_chat'] },
- }
- merged_role ={ **merged_role, **config['role'] }
- return { 'role': merged_role, 'options': merged_options }
-
-def parse_prompt_and_role(raw_prompt):
- prompt = raw_prompt.strip()
- roles = parse_roles(prompt)
- if not roles:
- # does not require role
- return (prompt, empty_role_options)
-
- last_role = roles[-1]
- prompt = prompt[prompt.index(last_role) + len(last_role):].strip()
-
- role_configs = [load_role_config(role) for role in roles]
- config = merge_role_configs(role_configs)
- if 'prompt' in config['role'] and config['role']['prompt']:
- delim = '' if prompt.startswith(':') else ':\n'
- prompt = config['role']['prompt'] + delim + prompt
- return (prompt, config['options'])
-
def make_chat_text_chunks(messages, config_options):
openai_options = make_openai_options(config_options)
http_options = make_http_options(config_options)
diff --git a/pytest.ini b/pytest.ini
new file mode 100644
index 0000000..cc50b10
--- /dev/null
+++ b/pytest.ini
@@ -0,0 +1,4 @@
+[pytest]
+pythonpath =
+ ./py
+ ./tests/mocks
diff --git a/tests/config_test.py b/tests/config_test.py
new file mode 100644
index 0000000..7b8d5f1
--- /dev/null
+++ b/tests/config_test.py
@@ -0,0 +1,133 @@
+import vim
+import os
+from config import make_config
+
+dirname = os.path.dirname(__file__)
+
+def default_eval_mock(cmd):
+ match cmd:
+ case 'g:vim_ai_debug_log_file':
+ return '/tmp/vim_ai_debug.log'
+ case 'g:vim_ai_roles_config_file':
+ return dirname + '/resources/roles.ini'
+ case _:
+ return None
+
+default_config = {
+ "options": {
+ "model": "gpt-4o",
+ "endpoint_url": "https://api.openai.com/v1/chat/completions",
+ "max_tokens": "0",
+ "max_completion_tokens": "0",
+ "temperature": "1",
+ "request_timeout": "20",
+ "stream": "1",
+ "enable_auth": "1",
+ "token_file_path": "",
+ "selection_boundary": "",
+ "initial_prompt": "You are a general assistant.",
+ },
+ "ui": {
+ "open_chat_command": "preset_below",
+ "scratch_buffer_keep_open": "0",
+ "populate_options": "0",
+ "code_syntax_enabled": "1",
+ "paste_mode": "1",
+ },
+}
+
+def make_input_mock(mocker, input_options):
+ def eval_mock(cmd):
+ if cmd == 'l:input':
+ return input_options
+ return default_eval_mock(cmd)
+ mocker.patch('vim.eval', eval_mock)
+
+
+def test_default_config(mocker):
+ make_input_mock(mocker, {
+ 'config_default': default_config,
+ 'config_extension': {},
+ 'instruction': 'hello',
+ 'command_type': 'chat',
+ })
+ command_spy = mocker.spy(vim, "command")
+ actual_output = make_config('l:input', 'l:output')
+ expected_output = {
+ 'config': default_config,
+ 'role_prompt': '',
+ }
+ command_spy.assert_called_once_with(f"let l:output={expected_output}")
+ assert expected_output == actual_output
+
+def test_param_config(mocker):
+ make_input_mock(mocker, {
+ 'config_default': default_config,
+ 'config_extension': {
+ 'options': {
+ 'max_tokens': '1000',
+ },
+ },
+ 'instruction': 'hello',
+ 'command_type': 'chat',
+ })
+ actual_config = make_config('l:input', 'l:output')['config']
+ assert '1000' == actual_config['options']['max_tokens']
+ assert 'gpt-4o' == actual_config['options']['model']
+
+def test_role_config(mocker):
+ make_input_mock(mocker, {
+ 'config_default': default_config,
+ 'config_extension': {},
+ 'instruction': '/test-role-simple',
+ 'command_type': 'chat',
+ })
+ config = make_config('l:input', 'l:output')
+ actual_config = config['config']
+ actual_role_prompt = config['role_prompt']
+ assert 'o1-preview' == actual_config['options']['model']
+ assert 'simple role prompt' == actual_role_prompt
+
+def test_role_config_different_commands(mocker):
+ make_input_mock(mocker, {
+ 'config_default': default_config,
+ 'config_extension': {},
+ 'instruction': '/test-role hello',
+ 'command_type': 'chat',
+ })
+ config = make_config('l:input', 'l:output')
+ actual_config = config['config']
+ actual_role_prompt = config['role_prompt']
+ assert 'model-common' == actual_config['options']['model']
+ assert 'https://localhost/chat' == actual_config['options']['endpoint_url']
+ assert '0' == actual_config['ui']['paste_mode']
+ assert 'preset_tab' == actual_config['ui']['open_chat_command']
+ assert '' == actual_role_prompt
+
+ make_input_mock(mocker, {
+ 'config_default': default_config,
+ 'config_extension': {},
+ 'instruction': '/test-role hello',
+ 'command_type': 'complete',
+ })
+ config = make_config('l:input', 'l:output')
+ actual_config = config['config']
+ actual_role_prompt = config['role_prompt']
+ assert 'model-common' == actual_config['options']['model']
+ assert 'https://localhost/complete' == actual_config['options']['endpoint_url']
+ assert '0' == actual_config['ui']['paste_mode']
+ assert '' == actual_role_prompt
+
+def test_multiple_role_configs(mocker):
+ make_input_mock(mocker, {
+ 'config_default': default_config,
+ 'config_extension': {},
+ 'instruction': '/test-role /test-role-simple hello',
+ 'command_type': 'chat',
+ })
+ config = make_config('l:input', 'l:output')
+ actual_config = config['config']
+ actual_role_prompt = config['role_prompt']
+ assert 'o1-preview' == actual_config['options']['model']
+ assert 'https://localhost/chat' == actual_config['options']['endpoint_url']
+ assert 'simple role prompt' == actual_role_prompt
diff --git a/tests/mocks/vim.py b/tests/mocks/vim.py
new file mode 100644
index 0000000..be14b4b
--- /dev/null
+++ b/tests/mocks/vim.py
@@ -0,0 +1,5 @@
+def eval(cmd):
+ pass
+
+def command(cmd):
+ pass
diff --git a/tests/resources/roles.ini b/tests/resources/roles.ini
new file mode 100644
index 0000000..c77d272
--- /dev/null
+++ b/tests/resources/roles.ini
@@ -0,0 +1,16 @@
+[test-role-simple]
+prompt = simple role prompt
+[test-role-simple.options]
+model = o1-preview
+
+[test-role]
+[test-role.options]
+model = model-common
+[test-role.options-chat]
+endpoint_url = https://localhost/chat
+[test-role.options-complete]
+endpoint_url = https://localhost/complete
+[test-role.ui]
+paste_mode = 0
+[test-role.ui-chat]
+open_chat_command = preset_tab