summaryrefslogtreecommitdiff
path: root/README.md
diff options
context:
space:
mode:
authorMartin Bielik <mx.bielik@gmail.com>2024-05-16 22:48:15 +0200
committerGitHub <noreply@github.com>2024-05-16 22:48:15 +0200
commit56dc5a54b118727881d225087ff3a20e5b0f6c79 (patch)
tree0b6b999b7ec8a9c74c91917937bf5d68dc3beeab /README.md
parent4e7fe9bcbfe6e4c97cd70a531438c8e961a20664 (diff)
parent11b9b9a0ba59ffc722ae74febebf6dc240a6154f (diff)
downloadvim-ai-56dc5a54b118727881d225087ff3a20e5b0f6c79.tar.gz
Merge pull request #99 from Konfekt/patch-3
increase token limit
Diffstat (limited to 'README.md')
-rw-r--r--README.md11
1 files changed, 6 insertions, 5 deletions
diff --git a/README.md b/README.md
index 5731541..d0af65d 100644
--- a/README.md
+++ b/README.md
@@ -256,7 +256,8 @@ temperature=0.2
generate a paragraph of lorem ipsum
```
-Below are listed all available configuration options, along with their default values:
+Below are listed all available configuration options, along with their default values.
+Please note that there isn't any token limit imposed, though 1000 is recommended.
```vim
" :AI
@@ -271,7 +272,7 @@ let g:vim_ai_complete = {
\ "options": {
\ "model": "gpt-3.5-turbo-instruct",
\ "endpoint_url": "https://api.openai.com/v1/completions",
-\ "max_tokens": 1000,
+\ "max_tokens": 0,
\ "temperature": 0.1,
\ "request_timeout": 20,
\ "enable_auth": 1,
@@ -294,7 +295,7 @@ let g:vim_ai_edit = {
\ "options": {
\ "model": "gpt-3.5-turbo-instruct",
\ "endpoint_url": "https://api.openai.com/v1/completions",
-\ "max_tokens": 1000,
+\ "max_tokens": 0,
\ "temperature": 0.1,
\ "request_timeout": 20,
\ "enable_auth": 1,
@@ -327,7 +328,7 @@ let g:vim_ai_chat = {
\ "options": {
\ "model": "gpt-3.5-turbo",
\ "endpoint_url": "https://api.openai.com/v1/chat/completions",
-\ "max_tokens": 1000,
+\ "max_tokens": 0,
\ "temperature": 1,
\ "request_timeout": 20,
\ "enable_auth": 1,
@@ -392,7 +393,7 @@ let chat_engine_config = {
\ "options": {
\ "model": "gpt-3.5-turbo",
\ "endpoint_url": "https://api.openai.com/v1/chat/completions",
-\ "max_tokens": 1000,
+\ "max_tokens": 0,
\ "temperature": 0.1,
\ "request_timeout": 20,
\ "selection_boundary": "",