fix: #3186 enable max_tokens in chat payload

This commit is contained in:
Yidadaa 2023-11-09 03:19:13 +08:00
parent 33b97082fa
commit d0a1d910d4
3 changed files with 5 additions and 4 deletions

View File

@ -70,6 +70,7 @@ export class ChatGPTApi implements LLMApi {
presence_penalty: modelConfig.presence_penalty, presence_penalty: modelConfig.presence_penalty,
frequency_penalty: modelConfig.frequency_penalty, frequency_penalty: modelConfig.frequency_penalty,
top_p: modelConfig.top_p, top_p: modelConfig.top_p,
max_tokens: Math.max(modelConfig.max_tokens, 1024),
}; };
console.log("[Request] openai payload: ", requestPayload); console.log("[Request] openai payload: ", requestPayload);

View File

@ -76,8 +76,8 @@ export function ModelConfigList(props: {
> >
<input <input
type="number" type="number"
min={100} min={1024}
max={100000} max={512000}
value={props.modelConfig.max_tokens} value={props.modelConfig.max_tokens}
onChange={(e) => onChange={(e) =>
props.updateConfig( props.updateConfig(

View File

@ -49,7 +49,7 @@ export const DEFAULT_CONFIG = {
model: "gpt-3.5-turbo" as ModelType, model: "gpt-3.5-turbo" as ModelType,
temperature: 0.5, temperature: 0.5,
top_p: 1, top_p: 1,
max_tokens: 2000, max_tokens: 8192,
presence_penalty: 0, presence_penalty: 0,
frequency_penalty: 0, frequency_penalty: 0,
sendMemory: true, sendMemory: true,
@ -82,7 +82,7 @@ export const ModalConfigValidator = {
return x as ModelType; return x as ModelType;
}, },
max_tokens(x: number) { max_tokens(x: number) {
return limitNumber(x, 0, 100000, 2000); return limitNumber(x, 0, 512000, 1024);
}, },
presence_penalty(x: number) { presence_penalty(x: number) {
return limitNumber(x, -2, 2, 0); return limitNumber(x, -2, 2, 0);