summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMartin Bielik <martin.bielik@instea.sk>2024-12-17 09:22:42 +0100
committerMartin Bielik <martin.bielik@instea.sk>2024-12-17 09:22:42 +0100
commit44625c9d77f6c44f1a4623402cada772dfaf6f9f (patch)
tree2e76b00168460bba319bba869f3921c2724e55be
parent8fde389664ca59773c38dc0ec1a434a98bc2428b (diff)
downloadvim-ai-44625c9d77f6c44f1a4623402cada772dfaf6f9f.tar.gz
simplified new role syntax
Diffstat (limited to '')
-rw-r--r--README.md36
-rw-r--r--autoload/vim_ai_config.vim3
-rw-r--r--doc/vim-ai.txt2
-rw-r--r--py/context.py53
-rw-r--r--roles-example.ini18
-rw-r--r--tests/resources/roles.ini20
6 files changed, 66 insertions, 66 deletions
diff --git a/README.md b/README.md
index d8df479..45a97b4 100644
--- a/README.md
+++ b/README.md
@@ -196,14 +196,14 @@ let g:vim_ai_roles_config_file = '/path/to/my/roles.ini'
[grammar]
prompt = fix spelling and grammar
-config.options.temperature = 0.4
+options.temperature = 0.4
[o1-mini]
-config.options.stream = 0
-config.options.model = o1-mini
-config.options.max_completion_tokens = 25000
-config.options.temperature = 1
-config.options.initial_prompt =
+options.stream = 0
+options.model = o1-mini
+options.max_completion_tokens = 25000
+options.temperature = 1
+options.initial_prompt =
```
Now you can select text and run it with command `:AIEdit /grammar`.
@@ -290,6 +290,7 @@ If you answer in a code, do not wrap it in markdown code block.
END
" :AI
+" - prompt: optional prepended prompt
" - engine: chat | complete - see how to configure complete engine in the section below
" - options: openai config (see https://platform.openai.com/docs/api-reference/completions)
" - options.initial_prompt: prompt prepended to every chat request (list of lines or string)
@@ -299,6 +300,7 @@ END
" - options.selection_boundary: selection prompt wrapper (eliminates empty responses, see #20)
" - ui.paste_mode: use paste mode (see more info in the Notes below)
let g:vim_ai_complete = {
+\ "prompt": "",
\ "engine": "chat",
\ "options": {
\ "model": "gpt-4o",
@@ -319,6 +321,7 @@ let g:vim_ai_complete = {
\}
" :AIEdit
+" - prompt: optional prepended prompt
" - engine: chat | complete - see how to configure complete engine in the section below
" - options: openai config (see https://platform.openai.com/docs/api-reference/completions)
" - options.initial_prompt: prompt prepended to every chat request (list of lines or string)
@@ -328,6 +331,7 @@ let g:vim_ai_complete = {
" - options.selection_boundary: selection prompt wrapper (eliminates empty responses, see #20)
" - ui.paste_mode: use paste mode (see more info in the Notes below)
let g:vim_ai_edit = {
+\ "prompt": "",
\ "engine": "chat",
\ "options": {
\ "model": "gpt-4o",
@@ -356,6 +360,7 @@ If you attach a code block add syntax type after ``` to enable syntax highlighti
END
" :AIChat
+" - prompt: optional prepended prompt
" - options: openai config (see https://platform.openai.com/docs/api-reference/chat)
" - options.initial_prompt: prompt prepended to every chat request (list of lines or string)
" - options.request_timeout: request timeout in seconds
@@ -367,6 +372,7 @@ END
" - ui.scratch_buffer_keep_open: re-use scratch buffer within the vim session
" - ui.paste_mode: use paste mode (see more info in the Notes below)
let g:vim_ai_chat = {
+\ "prompt": "",
\ "options": {
\ "model": "gpt-4o",
\ "endpoint_url": "https://api.openai.com/v1/chat/completions",
@@ -423,19 +429,19 @@ Then you set up a custom role that points to the OpenRouter endpoint:
```ini
[gemini]
-config.options.token_file_path = ~/.config/vim-ai-openrouter.token
-config.options.endpoint_url = https://openrouter.ai/api/v1/chat/completions
-config.options.model = google/gemini-exp-1121:free
+options.token_file_path = ~/.config/vim-ai-openrouter.token
+options.endpoint_url = https://openrouter.ai/api/v1/chat/completions
+options.model = google/gemini-exp-1121:free
[llama]
-config.options.token_file_path = ~/.config/vim-ai-openrouter.token
-config.options.endpoint_url = https://openrouter.ai/api/v1/chat/completions
-config.options.model = meta-llama/llama-3.3-70b-instruct
+options.token_file_path = ~/.config/vim-ai-openrouter.token
+options.endpoint_url = https://openrouter.ai/api/v1/chat/completions
+options.model = meta-llama/llama-3.3-70b-instruct
[claude]
-config.options.token_file_path = ~/.config/vim-ai-openrouter.token
-config.options.endpoint_url = https://openrouter.ai/api/v1/chat/completions
-config.options.model = anthropic/claude-3.5-haiku
+options.token_file_path = ~/.config/vim-ai-openrouter.token
+options.endpoint_url = https://openrouter.ai/api/v1/chat/completions
+options.model = anthropic/claude-3.5-haiku
```
Now you can use the role:
diff --git a/autoload/vim_ai_config.vim b/autoload/vim_ai_config.vim
index 50e36ab..7264ced 100644
--- a/autoload/vim_ai_config.vim
+++ b/autoload/vim_ai_config.vim
@@ -9,6 +9,7 @@ Do not provide any explanantion or comments if not requested.
If you answer in a code, do not wrap it in markdown code block.
END
let g:vim_ai_complete_default = {
+\ "prompt": "",
\ "engine": "chat",
\ "options": {
\ "model": "gpt-4o",
@@ -28,6 +29,7 @@ let g:vim_ai_complete_default = {
\ },
\}
let g:vim_ai_edit_default = {
+\ "prompt": "",
\ "engine": "chat",
\ "options": {
\ "model": "gpt-4o",
@@ -54,6 +56,7 @@ You are a general assistant.
If you attach a code block add syntax type after ``` to enable syntax highlighting.
END
let g:vim_ai_chat_default = {
+\ "prompt": "",
\ "options": {
\ "model": "gpt-4o",
\ "endpoint_url": "https://api.openai.com/v1/chat/completions",
diff --git a/doc/vim-ai.txt b/doc/vim-ai.txt
index 03c7e4d..1747965 100644
--- a/doc/vim-ai.txt
+++ b/doc/vim-ai.txt
@@ -198,7 +198,7 @@ Example of a role: >
[grammar]
prompt = fix spelling and grammar
- config.options.temperature = 0.4
+ options.temperature = 0.4
Now you can select text and run it with command `:AIEdit /grammar`.
See roles-example.ini for more examples.
diff --git a/py/context.py b/py/context.py
index 87c3a14..50fa170 100644
--- a/py/context.py
+++ b/py/context.py
@@ -39,31 +39,23 @@ def load_roles_with_deprecated_syntax(roles, role):
return {
'role_default': {
'prompt': prompt,
- 'config': {
- 'options': dict(roles.get(f"{role}.options", {})),
- 'ui': dict(roles.get(f"{role}.ui", {})),
- },
+ 'options': dict(roles.get(f"{role}.options", {})),
+ 'ui': dict(roles.get(f"{role}.ui", {})),
},
'role_complete': {
'prompt': prompt,
- 'config': {
- 'options': dict(roles.get(f"{role}.options-complete", {})),
- 'ui': dict(roles.get(f"{role}.ui-complete", {})),
- },
+ 'options': dict(roles.get(f"{role}.options-complete", {})),
+ 'ui': dict(roles.get(f"{role}.ui-complete", {})),
},
'role_edit': {
'prompt': prompt,
- 'config': {
- 'options': dict(roles.get(f"{role}.options-edit", {})),
- 'ui': dict(roles.get(f"{role}.ui-edit", {})),
- },
+ 'options': dict(roles.get(f"{role}.options-edit", {})),
+ 'ui': dict(roles.get(f"{role}.ui-edit", {})),
},
'role_chat': {
'prompt': prompt,
- 'config': {
- 'options': dict(roles.get(f"{role}.options-chat", {})),
- 'ui': dict(roles.get(f"{role}.ui-chat", {})),
- },
+ 'options': dict(roles.get(f"{role}.options-chat", {})),
+ 'ui': dict(roles.get(f"{role}.ui-chat", {})),
},
}
@@ -120,20 +112,18 @@ def parse_prompt_and_role_config(user_instruction, command_type):
roles = parse_role_names(user_instruction)
if not roles:
# does not require role
- return (user_instruction, '', {})
+ return (user_instruction, {})
last_role = roles[-1]
user_prompt = user_instruction[user_instruction.index(last_role) + len(last_role):].strip() # strip roles
parsed_role = merge_deep([load_role_config(role) for role in roles])
- role_default = parsed_role['role_default']
- role_command = parsed_role['role_' + command_type]
- config = merge_deep([role_default.get('config', {}), role_command.get('config', {})])
- role_prompt = role_default.get('prompt') or role_command.get('prompt', '')
- return user_prompt, role_prompt, config
-
-def make_selection_prompt(user_selection, user_prompt, role_prompt, selection_boundary):
- if not user_prompt and not role_prompt:
+ config = merge_deep([parsed_role['role_default'], parsed_role['role_' + command_type]])
+ role_prompt = config.get('prompt', '')
+ return user_prompt, config
+
+def make_selection_prompt(user_selection, user_prompt, config_prompt, selection_boundary):
+ if not user_prompt and not config_prompt:
return user_selection
elif user_selection:
if selection_boundary and selection_boundary not in user_selection:
@@ -142,15 +132,15 @@ def make_selection_prompt(user_selection, user_prompt, role_prompt, selection_bo
return user_selection
return ''
-def make_prompt(role_prompt, user_prompt, user_selection, selection_boundary):
+def make_prompt(config_prompt, user_prompt, user_selection, selection_boundary):
user_prompt = user_prompt.strip()
delimiter = ":\n" if user_prompt and user_selection else ""
- user_selection = make_selection_prompt(user_selection, user_prompt, role_prompt, selection_boundary)
+ user_selection = make_selection_prompt(user_selection, user_prompt, config_prompt, selection_boundary)
prompt = f"{user_prompt}{delimiter}{user_selection}"
- if not role_prompt:
+ if not config_prompt:
return prompt
delimiter = '' if prompt.startswith(':') else ':\n'
- prompt = f"{role_prompt}{delimiter}{prompt}"
+ prompt = f"{config_prompt}{delimiter}{prompt}"
return prompt
def make_ai_context(params):
@@ -160,10 +150,11 @@ def make_ai_context(params):
user_selection = params['user_selection']
command_type = params['command_type']
- user_prompt, role_prompt, role_config = parse_prompt_and_role_config(user_instruction, command_type)
+ user_prompt, role_config = parse_prompt_and_role_config(user_instruction, command_type)
final_config = merge_deep([config_default, config_extension, role_config])
selection_boundary = final_config['options']['selection_boundary']
- prompt = make_prompt(role_prompt, user_prompt, user_selection, selection_boundary)
+ config_prompt = final_config.get('prompt', '')
+ prompt = make_prompt(config_prompt, user_prompt, user_selection, selection_boundary)
return {
'config': final_config,
diff --git a/roles-example.ini b/roles-example.ini
index 6134aca..41ec899 100644
--- a/roles-example.ini
+++ b/roles-example.ini
@@ -14,18 +14,18 @@ prompt =
please refactor it in a more clean and concise way so that my colleagues
can maintain the code more easily. Also, explain why you want to refactor
the code so that I can add the explanation to the Pull Request.
-config.options.temperature = 0.4
+options.temperature = 0.4
# command specific options:
[refactor.chat]
-config.options.model = gpt-4o
+options.model = gpt-4o
[refactor.complete]
-config.options.model = gpt-4
+options.model = gpt-4
[refactor.edit]
-config.options.model = gpt-4
+options.model = gpt-4
[o1-mini]
-config.options.stream = 0
-config.options.model = o1-mini
-config.options.max_completion_tokens = 25000
-config.options.temperature = 1
-config.options.initial_prompt =
+options.stream = 0
+options.model = o1-mini
+options.max_completion_tokens = 25000
+options.temperature = 1
+options.initial_prompt =
diff --git a/tests/resources/roles.ini b/tests/resources/roles.ini
index 450df1d..b677462 100644
--- a/tests/resources/roles.ini
+++ b/tests/resources/roles.ini
@@ -1,22 +1,22 @@
[test-role-simple]
prompt = simple role prompt
-config.options.model = o1-preview
+options.model = o1-preview
[test-role]
-config.options.model = model-common
-config.ui.paste_mode = 0
+options.model = model-common
+ui.paste_mode = 0
[test-role.chat]
-config.options.endpoint_url = https://localhost/chat
-config.ui.open_chat_command = preset_tab
+options.endpoint_url = https://localhost/chat
+ui.open_chat_command = preset_tab
[test-role.complete]
-config.engine = complete
-config.options.endpoint_url = https://localhost/complete
+engine = complete
+options.endpoint_url = https://localhost/complete
[test-role.edit]
-config.engine = complete
-config.options.endpoint_url = https://localhost/edit
+engine = complete
+options.endpoint_url = https://localhost/edit
[chat-only-role.chat]
-config.options.open_chat_command = preset_tab
+options.open_chat_command = preset_tab
[deprecated-test-role-simple]
prompt = simple role prompt