diff options
| author | Martin Bielik <mx.bielik@gmail.com> | 2023-06-11 12:53:56 +0200 |
|---|---|---|
| committer | Martin Bielik <mx.bielik@gmail.com> | 2023-06-11 12:53:56 +0200 |
| commit | 531a1f646144d195fc58c2756d0109d793e0ae96 (patch) | |
| tree | 1d1368dc5f43cfe0efec6c1e54ae45b7eaee96dc | |
| parent | a167e4888887784adac99a5a15c1a956e7af8de0 (diff) | |
| download | vim-ai-531a1f646144d195fc58c2756d0109d793e0ae96.tar.gz | |
optional max_tokens, fixes #42
Diffstat (limited to '')
| -rw-r--r-- | README.md | 5 | ||||
| -rw-r--r-- | py/utils.py | 3 |
2 files changed, 7 insertions, 1 deletions
@@ -282,6 +282,11 @@ let g:vim_ai_chat = { " - if disabled code indentation will work but AI doesn't always respond with a code block " therefore it could be messed up " - find out more in vim's help `:help paste` +" options.max_tokens +" - note that prompt + max_tokens must be less than model's token limit, see #42, #46 +" - setting max tokens to 0 will exclude it from the OpenAI API request parameters, it is +" unclear/undocumented what it exactly does, but it seems to resolve issues when the model +" hits token limit, which respond with `OpenAI: HTTPError 400` ``` ### Using chat engine for completion and edits diff --git a/py/utils.py b/py/utils.py index 09dfaa3..8539347 100644 --- a/py/utils.py +++ b/py/utils.py @@ -27,9 +27,10 @@ def load_api_key(): return api_key.strip() def make_openai_options(options): + max_tokens = int(options['max_tokens']) return { 'model': options['model'], - 'max_tokens': int(options['max_tokens']), + 'max_tokens': max_tokens if max_tokens > 0 else None, 'temperature': float(options['temperature']), } |