forked from XiaoMo/ChatGPT-Next-Web
feat: wont send max_tokens
This commit is contained in:
parent
3b3ebda34b
commit
fd2f441e02
@ -70,7 +70,8 @@ export class ChatGPTApi implements LLMApi {
|
|||||||
presence_penalty: modelConfig.presence_penalty,
|
presence_penalty: modelConfig.presence_penalty,
|
||||||
frequency_penalty: modelConfig.frequency_penalty,
|
frequency_penalty: modelConfig.frequency_penalty,
|
||||||
top_p: modelConfig.top_p,
|
top_p: modelConfig.top_p,
|
||||||
max_tokens: Math.max(modelConfig.max_tokens, 1024),
|
// max_tokens: Math.max(modelConfig.max_tokens, 1024),
|
||||||
|
// Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
|
||||||
};
|
};
|
||||||
|
|
||||||
console.log("[Request] openai payload: ", requestPayload);
|
console.log("[Request] openai payload: ", requestPayload);
|
||||||
|
Loading…
Reference in New Issue
Block a user