forked from XiaoMo/ChatGPT-Next-Web
fix: #3192 use smaller max_tokens as default
This commit is contained in:
parent
d0a1d910d4
commit
87e3d663a2
@ -49,7 +49,7 @@ export const DEFAULT_CONFIG = {
|
|||||||
model: "gpt-3.5-turbo" as ModelType,
|
model: "gpt-3.5-turbo" as ModelType,
|
||||||
temperature: 0.5,
|
temperature: 0.5,
|
||||||
top_p: 1,
|
top_p: 1,
|
||||||
max_tokens: 8192,
|
max_tokens: 4000,
|
||||||
presence_penalty: 0,
|
presence_penalty: 0,
|
||||||
frequency_penalty: 0,
|
frequency_penalty: 0,
|
||||||
sendMemory: true,
|
sendMemory: true,
|
||||||
|
Loading…
x
Reference in New Issue
Block a user