From 531a1f646144d195fc58c2756d0109d793e0ae96 Mon Sep 17 00:00:00 2001 From: Martin Bielik Date: Sun, 11 Jun 2023 12:53:56 +0200 Subject: optional max_tokens, fixes #42 --- README.md | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'README.md') diff --git a/README.md b/README.md index fb4de86..8ab679d 100644 --- a/README.md +++ b/README.md @@ -282,6 +282,11 @@ let g:vim_ai_chat = { " - if disabled code indentation will work but AI doesn't always respond with a code block " therefore it could be messed up " - find out more in vim's help `:help paste` +" options.max_tokens +" - note that prompt + max_tokens must be less than model's token limit, see #42, #46 +" - setting max tokens to 0 will exclude it from the OpenAI API request parameters, it is +" unclear/undocumented what it exactly does, but it seems to resolve issues when the model +" hits token limit, which respond with `OpenAI: HTTPError 400` ``` ### Using chat engine for completion and edits -- cgit v1.2.3