summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMartin Bielik <martin.bielik@instea.sk>2024-12-03 22:12:52 +0100
committerMartin Bielik <martin.bielik@instea.sk>2024-12-03 22:12:52 +0100
commit4b549017d2274d62f532c3ad00b7706f78fac824 (patch)
treef282aeb663336c0a658332a77ac386c1211d801c
parentce66bb43831dfe2d32b578d6abf84976a3c4ce0e (diff)
downloadvim-ai-4b549017d2274d62f532c3ad00b7706f78fac824.tar.gz
o1 support - max_completion_tokens
Diffstat (limited to '')
-rw-r--r--autoload/vim_ai_config.vim1
-rw-r--r--py/utils.py9
2 files changed, 8 insertions, 2 deletions
diff --git a/autoload/vim_ai_config.vim b/autoload/vim_ai_config.vim
index 37abd18..6442973 100644
--- a/autoload/vim_ai_config.vim
+++ b/autoload/vim_ai_config.vim
@@ -42,6 +42,7 @@ let g:vim_ai_chat_default = {
\ "model": "gpt-4o",
\ "endpoint_url": "https://api.openai.com/v1/chat/completions",
\ "max_tokens": 0,
+\ "max_completion_tokens": 0,
\ "temperature": 1,
\ "request_timeout": 20,
\ "enable_auth": 1,
diff --git a/py/utils.py b/py/utils.py
index c54f2e5..604494c 100644
--- a/py/utils.py
+++ b/py/utils.py
@@ -51,12 +51,17 @@ def normalize_config(config):
def make_openai_options(options):
max_tokens = int(options['max_tokens'])
- return {
+ max_completion_tokens = int(options['max_completion_tokens'])
+ result = {
'model': options['model'],
- 'max_tokens': max_tokens if max_tokens > 0 else None,
'temperature': float(options['temperature']),
'stream': int(options['stream']) == 1,
}
+ if max_tokens > 0:
+ result['max_tokens'] = max_tokens
+ if max_completion_tokens > 0:
+ result['max_completion_tokens'] = max_completion_tokens
+ return result
def make_http_options(options):
return {