feat: close #928 summarize with gpt-3.5

This commit is contained in:
Yidadaa 2023-04-20 23:04:58 +08:00
parent 2e9e69d66c
commit 06d503152b
2 changed files with 40 additions and 14 deletions

View File

@ -1,5 +1,11 @@
import type { ChatRequest, ChatResponse } from "./api/openai/typing"; import type { ChatRequest, ChatResponse } from "./api/openai/typing";
import { Message, ModelConfig, useAccessStore, useChatStore } from "./store"; import {
Message,
ModelConfig,
ModelType,
useAccessStore,
useChatStore,
} from "./store";
import { showToast } from "./components/ui-lib"; import { showToast } from "./components/ui-lib";
const TIME_OUT_MS = 60000; const TIME_OUT_MS = 60000;
@ -9,6 +15,7 @@ const makeRequestParam = (
options?: { options?: {
filterBot?: boolean; filterBot?: boolean;
stream?: boolean; stream?: boolean;
model?: ModelType;
}, },
): ChatRequest => { ): ChatRequest => {
let sendMessages = messages.map((v) => ({ let sendMessages = messages.map((v) => ({
@ -26,6 +33,11 @@ const makeRequestParam = (
// @ts-expect-error // @ts-expect-error
delete modelConfig.max_tokens; delete modelConfig.max_tokens;
// override model config
if (options?.model) {
modelConfig.model = options.model;
}
return { return {
messages: sendMessages, messages: sendMessages,
stream: options?.stream, stream: options?.stream,
@ -50,7 +62,7 @@ function getHeaders() {
export function requestOpenaiClient(path: string) { export function requestOpenaiClient(path: string) {
return (body: any, method = "POST") => return (body: any, method = "POST") =>
fetch("/api/openai?_vercel_no_cache=1", { fetch("/api/openai", {
method, method,
headers: { headers: {
"Content-Type": "application/json", "Content-Type": "application/json",
@ -61,8 +73,16 @@ export function requestOpenaiClient(path: string) {
}); });
} }
export async function requestChat(messages: Message[]) { export async function requestChat(
const req: ChatRequest = makeRequestParam(messages, { filterBot: true }); messages: Message[],
options?: {
model?: ModelType;
},
) {
const req: ChatRequest = makeRequestParam(messages, {
filterBot: true,
model: options?.model,
});
const res = await requestOpenaiClient("v1/chat/completions")(req); const res = await requestOpenaiClient("v1/chat/completions")(req);
@ -204,7 +224,13 @@ export async function requestChatStream(
} }
} }
export async function requestWithPrompt(messages: Message[], prompt: string) { export async function requestWithPrompt(
messages: Message[],
prompt: string,
options?: {
model?: ModelType;
},
) {
messages = messages.concat([ messages = messages.concat([
{ {
role: "user", role: "user",
@ -213,7 +239,7 @@ export async function requestWithPrompt(messages: Message[], prompt: string) {
}, },
]); ]);
const res = await requestChat(messages); const res = await requestChat(messages, options);
return res?.choices?.at(0)?.message?.content ?? ""; return res?.choices?.at(0)?.message?.content ?? "";
} }

View File

@ -535,14 +535,14 @@ export const useChatStore = create<ChatStore>()(
session.topic === DEFAULT_TOPIC && session.topic === DEFAULT_TOPIC &&
countMessages(session.messages) >= SUMMARIZE_MIN_LEN countMessages(session.messages) >= SUMMARIZE_MIN_LEN
) { ) {
requestWithPrompt(session.messages, Locale.Store.Prompt.Topic).then( requestWithPrompt(session.messages, Locale.Store.Prompt.Topic, {
(res) => { model: "gpt-3.5-turbo",
get().updateCurrentSession( }).then((res) => {
(session) => get().updateCurrentSession(
(session.topic = res ? trimTopic(res) : DEFAULT_TOPIC), (session) =>
); (session.topic = res ? trimTopic(res) : DEFAULT_TOPIC),
}, );
); });
} }
const config = get().config; const config = get().config;